code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
class RestServer:
def __init__(self, cluster_configuration, service_configuration, default_service_configuraiton):
self.cluster_configuration = cluster_configuration
self.service_configuration = dict(default_service_configuraiton,
**service_configuration)
#### Fist check, ensure all the configured data in cluster_configuration, service_configuration, default_service_configuration is right. And nothing is miss.
def validation_pre(self):
if 'default-pai-admin-username' not in self.service_configuration:
return False, '"default-pai-admin-username" is required in rest-server'
if 'default-pai-admin-password' not in self.service_configuration:
return False, '"default-pai-admin-password" is required in rest-server'
machine_list = self.cluster_configuration['machine-list']
if all(host.get('k8s-role') != 'master' for host in machine_list):
return False, 'At least 1 "k8s-role=master" labeled machine is required to deploy the etcd'
if len([host for host in machine_list if host.get('restserver') == 'true']) != 1:
return False, '1 and only 1 "restserver=true" machine is required to deploy the rest server'
return True, None
#### Generate the final service object model
def run(self):
# parse your service object model here, and return a generated dictionary
machine_list = self.cluster_configuration['machine-list']
master_ip = [host['hostip'] for host in machine_list if host.get('restserver') == 'true'][0]
server_port = self.service_configuration['server-port']
service_object_model = dict()
service_object_model['uri'] = 'http://{0}:{1}'.format(master_ip, server_port)
service_object_model['server-port'] = server_port
service_object_model['jwt-secret'] = self.service_configuration['jwt-secret']
service_object_model['default-pai-admin-username'] = self.service_configuration['default-pai-admin-username']
service_object_model['default-pai-admin-password'] = self.service_configuration['default-pai-admin-password']
service_object_model['github-owner'] = self.service_configuration['github-owner']
service_object_model['github-repository'] = self.service_configuration['github-repository']
service_object_model['github-path'] = self.service_configuration['github-path']
service_object_model['etcd-uris'] = ','.join('http://{0}:4001'.format(host['hostip'])
for host in machine_list
if host.get('k8s-role') == 'master')
return service_object_model
#### All service and main module (kubrenetes, machine) is generated. And in this check steps, you could refer to the service object model which you will used in your own service, and check its existence and correctness.
def validation_post(self, cluster_object_model):
if 'yarn-frameworklauncher' not in cluster_object_model or 'webservice' not in cluster_object_model['yarn-frameworklauncher']:
return False, 'yarn-frameworklauncher.webservice is required'
if 'hadoop-name-node' not in cluster_object_model or 'master-ip' not in cluster_object_model['hadoop-name-node']:
return False, 'hadoop-name-node.master-ip is required'
if 'hadoop-resource-manager' not in cluster_object_model or 'master-ip' not in cluster_object_model['hadoop-resource-manager']:
return False, 'hadoop-resource-manager.master-ip is required'
return True, None | src/rest-server/config/rest_server.py |
class RestServer:
def __init__(self, cluster_configuration, service_configuration, default_service_configuraiton):
self.cluster_configuration = cluster_configuration
self.service_configuration = dict(default_service_configuraiton,
**service_configuration)
#### Fist check, ensure all the configured data in cluster_configuration, service_configuration, default_service_configuration is right. And nothing is miss.
def validation_pre(self):
if 'default-pai-admin-username' not in self.service_configuration:
return False, '"default-pai-admin-username" is required in rest-server'
if 'default-pai-admin-password' not in self.service_configuration:
return False, '"default-pai-admin-password" is required in rest-server'
machine_list = self.cluster_configuration['machine-list']
if all(host.get('k8s-role') != 'master' for host in machine_list):
return False, 'At least 1 "k8s-role=master" labeled machine is required to deploy the etcd'
if len([host for host in machine_list if host.get('restserver') == 'true']) != 1:
return False, '1 and only 1 "restserver=true" machine is required to deploy the rest server'
return True, None
#### Generate the final service object model
def run(self):
# parse your service object model here, and return a generated dictionary
machine_list = self.cluster_configuration['machine-list']
master_ip = [host['hostip'] for host in machine_list if host.get('restserver') == 'true'][0]
server_port = self.service_configuration['server-port']
service_object_model = dict()
service_object_model['uri'] = 'http://{0}:{1}'.format(master_ip, server_port)
service_object_model['server-port'] = server_port
service_object_model['jwt-secret'] = self.service_configuration['jwt-secret']
service_object_model['default-pai-admin-username'] = self.service_configuration['default-pai-admin-username']
service_object_model['default-pai-admin-password'] = self.service_configuration['default-pai-admin-password']
service_object_model['github-owner'] = self.service_configuration['github-owner']
service_object_model['github-repository'] = self.service_configuration['github-repository']
service_object_model['github-path'] = self.service_configuration['github-path']
service_object_model['etcd-uris'] = ','.join('http://{0}:4001'.format(host['hostip'])
for host in machine_list
if host.get('k8s-role') == 'master')
return service_object_model
#### All service and main module (kubrenetes, machine) is generated. And in this check steps, you could refer to the service object model which you will used in your own service, and check its existence and correctness.
def validation_post(self, cluster_object_model):
if 'yarn-frameworklauncher' not in cluster_object_model or 'webservice' not in cluster_object_model['yarn-frameworklauncher']:
return False, 'yarn-frameworklauncher.webservice is required'
if 'hadoop-name-node' not in cluster_object_model or 'master-ip' not in cluster_object_model['hadoop-name-node']:
return False, 'hadoop-name-node.master-ip is required'
if 'hadoop-resource-manager' not in cluster_object_model or 'master-ip' not in cluster_object_model['hadoop-resource-manager']:
return False, 'hadoop-resource-manager.master-ip is required'
return True, None | 0.604516 | 0.105671 |
import os
import string
import nltk # type: ignore
import re
from nltk import FreqDist
from nltk.corpus import brown, stopwords # type: ignore
from typing import List
from langcreator.common import Generators, InputOutput, InputOutputGenerator, get_tags, choice, builtin_generators
nltk.download('brown', quiet=True)
nltk.download('stopwords', quiet=True)
value_types = ['string', 'number', 'written_number', 'variable', 'list']
numbers_written = [
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten'
]
frequency_list = FreqDist(w.lower() for w in brown.words() if len(w) > 2)
reserved_words = set([
"get", "if", "while", "for", "break", "continue", "end", "any", "and",
"or", "remove", "delete", "set", "between", "same", "greater", "smaller",
"equals", "use", "set", "let", "equals", "be", "to", "is", "do", "done",
"exit", "break", "continue", "times", "end", "let", "remove", "update",
"jump", "go", "each", "switch", "case", "replace", "match", "small",
"greater", "than"
] + numbers_written + value_types)
stopwords_en = stopwords.words('english')
letters = list(string.ascii_lowercase)
names = [
w for w, _ in frequency_list.most_common()[100:300] if w not in
reserved_words and w not in stopwords_en and re.match("^[a-z]+$", w)
] + letters
numbers = list(range(101))
def generate_samples(generators: Generators, n: int) -> List[InputOutput]:
return [
_generate_sample(generators, choice(list(generators.keys())))
for _ in range(n)
]
def _generate_sample(generators: Generators, key: str) -> InputOutput:
key = key.replace("'", "")
if key in builtin_generators:
return _generate_builtin(key)
generator = generators[key]
if type(generator) == dict:
return _generate_input_output_sample(generators, generator)
elif type(generator) == list:
return _generate_sample(generators, choice(generator))
else:
raise Exception(f"Invalid generator {key}")
def _generate_builtin(key: str):
if key == "int":
input = output = str(choice(numbers))
return (input, output)
elif key == "name":
input = output = _generate_name()
return (input, output)
elif key == "float":
input = output = str(choice(numbers) / choice([10, 100]))
return (input, output)
elif key == "string":
input = output = _generate_string()
return (input, output)
else:
raise Exception(f"Builtin generator for {key} not implemented yet")
def _generate_input_output_sample(
generators: Generators,
generator: InputOutputGenerator) -> InputOutput:
output_template = choice(list(generator.keys()))
input_template = choice(generator[output_template])
tags = get_tags(input_template)
for tag in tags:
key_ = tag.replace("#", "")
(input, output) = _generate_sample(generators, key_)
output = _adjust_indentation(output)
output = _adjust_ending(output)
input_template = input_template.replace(tag, input, 1)
output_template = output_template.replace(tag, output, 1)
return (input_template, output_template)
def _generate_name():
return "_".join(choice(names, choice([1, 2, 3])))
def _generate_string():
text = " ".join(choice(names, choice([1, 2, 3])))
quote = choice(["'", '"'])
return quote + text + quote
def _adjust_indentation(output: str):
return output.replace("\\n", "\\n\\t")
def _adjust_ending(output: str):
if len(output) > 0 and output[-1] == ':':
return output + ' pass'
return output
def save_generated(generated: List[InputOutput], path: str):
inputs = [i for i, _ in generated]
outputs = [o for _, o in generated]
with open(os.path.join(path, 'inputs.txt'), 'w') as f:
f.write("\n".join(inputs))
with open(os.path.join(path, 'outputs.txt'), 'w') as f:
f.write("\n".join(outputs)) | langcreator/generator.py | import os
import string
import nltk # type: ignore
import re
from nltk import FreqDist
from nltk.corpus import brown, stopwords # type: ignore
from typing import List
from langcreator.common import Generators, InputOutput, InputOutputGenerator, get_tags, choice, builtin_generators
nltk.download('brown', quiet=True)
nltk.download('stopwords', quiet=True)
value_types = ['string', 'number', 'written_number', 'variable', 'list']
numbers_written = [
'zero', 'one', 'two', 'three', 'four', 'five', 'six', 'seven', 'eight',
'nine', 'ten'
]
frequency_list = FreqDist(w.lower() for w in brown.words() if len(w) > 2)
reserved_words = set([
"get", "if", "while", "for", "break", "continue", "end", "any", "and",
"or", "remove", "delete", "set", "between", "same", "greater", "smaller",
"equals", "use", "set", "let", "equals", "be", "to", "is", "do", "done",
"exit", "break", "continue", "times", "end", "let", "remove", "update",
"jump", "go", "each", "switch", "case", "replace", "match", "small",
"greater", "than"
] + numbers_written + value_types)
stopwords_en = stopwords.words('english')
letters = list(string.ascii_lowercase)
names = [
w for w, _ in frequency_list.most_common()[100:300] if w not in
reserved_words and w not in stopwords_en and re.match("^[a-z]+$", w)
] + letters
numbers = list(range(101))
def generate_samples(generators: Generators, n: int) -> List[InputOutput]:
return [
_generate_sample(generators, choice(list(generators.keys())))
for _ in range(n)
]
def _generate_sample(generators: Generators, key: str) -> InputOutput:
key = key.replace("'", "")
if key in builtin_generators:
return _generate_builtin(key)
generator = generators[key]
if type(generator) == dict:
return _generate_input_output_sample(generators, generator)
elif type(generator) == list:
return _generate_sample(generators, choice(generator))
else:
raise Exception(f"Invalid generator {key}")
def _generate_builtin(key: str):
if key == "int":
input = output = str(choice(numbers))
return (input, output)
elif key == "name":
input = output = _generate_name()
return (input, output)
elif key == "float":
input = output = str(choice(numbers) / choice([10, 100]))
return (input, output)
elif key == "string":
input = output = _generate_string()
return (input, output)
else:
raise Exception(f"Builtin generator for {key} not implemented yet")
def _generate_input_output_sample(
generators: Generators,
generator: InputOutputGenerator) -> InputOutput:
output_template = choice(list(generator.keys()))
input_template = choice(generator[output_template])
tags = get_tags(input_template)
for tag in tags:
key_ = tag.replace("#", "")
(input, output) = _generate_sample(generators, key_)
output = _adjust_indentation(output)
output = _adjust_ending(output)
input_template = input_template.replace(tag, input, 1)
output_template = output_template.replace(tag, output, 1)
return (input_template, output_template)
def _generate_name():
return "_".join(choice(names, choice([1, 2, 3])))
def _generate_string():
text = " ".join(choice(names, choice([1, 2, 3])))
quote = choice(["'", '"'])
return quote + text + quote
def _adjust_indentation(output: str):
return output.replace("\\n", "\\n\\t")
def _adjust_ending(output: str):
if len(output) > 0 and output[-1] == ':':
return output + ' pass'
return output
def save_generated(generated: List[InputOutput], path: str):
inputs = [i for i, _ in generated]
outputs = [o for _, o in generated]
with open(os.path.join(path, 'inputs.txt'), 'w') as f:
f.write("\n".join(inputs))
with open(os.path.join(path, 'outputs.txt'), 'w') as f:
f.write("\n".join(outputs)) | 0.486819 | 0.275288 |
import logging
from owslib.etree import etree
from owslib import crs, util
from owslib.namespaces import Namespaces
LOGGER = logging.getLogger(__name__)
n = Namespaces()
OWS_NAMESPACE_1_0_0 = n.get_namespace("ows")
OWS_NAMESPACE_1_1_0 = n.get_namespace("ows110")
OWS_NAMESPACE_2_0_0 = n.get_namespace("ows200")
XSI_NAMESPACE = n.get_namespace("xsi")
XLINK_NAMESPACE = n.get_namespace("xlink")
DEFAULT_OWS_NAMESPACE = OWS_NAMESPACE_1_1_0 # Use this as default for OWSCommon objects
class OwsCommon(object):
"""Initialize OWS Common object"""
def __init__(self, version):
self.version = version
if version == '1.0.0':
self.namespace = OWS_NAMESPACE_1_0_0
elif version == '1.1.0':
self.namespace = OWS_NAMESPACE_1_1_0
else:
self.namespace = OWS_NAMESPACE_2_0_0
class ServiceIdentification(object):
"""Initialize an OWS Common ServiceIdentification construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('Title', namespace))
self.title = util.testXMLValue(val)
val = self._root.find(util.nspath('Abstract', namespace))
self.abstract = util.testXMLValue(val)
self.keywords = []
for f in self._root.findall(util.nspath('Keywords/Keyword', namespace)):
if f.text is not None:
self.keywords.append(f.text)
val = self._root.find(util.nspath('AccessConstraints', namespace))
self.accessconstraints = util.testXMLValue(val)
val = self._root.find(util.nspath('Fees', namespace))
self.fees = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceType', namespace))
self.type = util.testXMLValue(val)
self.service = self.type # alternative? keep both?discuss
val = self._root.find(util.nspath('ServiceTypeVersion', namespace))
self.version = util.testXMLValue(val)
self.versions = []
for v in self._root.findall(util.nspath('ServiceTypeVersion', namespace)):
self.versions.append(util.testXMLValue(v))
self.profiles = []
for p in self._root.findall(util.nspath('Profile', namespace)):
self.profiles.append(util.testXMLValue(p))
def __str__(self):
return 'Service: {}, title={}'.format(self.service, self.title or '')
def __repr__(self):
return '<owslib.ows.ServiceIdentification {} at {}>'.format(self.service, hex(id(self)))
class ServiceProvider(object):
"""Initialize an OWS Common ServiceProvider construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.contact = ServiceContact(infoset, namespace)
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
try:
urlattrib = val.attrib[util.nspath('href', XLINK_NAMESPACE)]
self.url = util.testXMLValue(urlattrib, True)
except KeyError:
self.url = None
else:
self.url = None
class ServiceContact(object):
"""Initialize an OWS Common ServiceContact construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.organization = util.testXMLValue(
self._root.find(util.nspath('ContactPersonPrimary/ContactOrganization', namespace)))
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
self.site = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.site = None
val = self._root.find(util.nspath('ServiceContact/Role', namespace))
self.role = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/IndividualName', namespace))
self.name = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/PositionName', namespace))
self.position = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Voice', namespace))
self.phone = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Facsimile', namespace))
self.fax = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/DeliveryPoint', namespace))
self.address = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/City', namespace))
self.city = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/AdministrativeArea', namespace))
self.region = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/PostalCode', namespace))
self.postcode = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/Country', namespace))
self.country = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/ElectronicMailAddress', namespace))
self.email = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/OnlineResource', namespace))
if val is not None:
self.url = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.url = None
val = self._root.find(util.nspath('ServiceContact/ContactInfo/HoursOfService', namespace))
self.hours = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/ContactInstructions', namespace))
self.instructions = util.testXMLValue(val)
class Constraint(object):
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib.get('name')
self.values = [i.text for i in elem.findall(util.nspath('Value', namespace))]
self.values += [i.text for i in elem.findall(util.nspath('AllowedValues/Value', namespace))]
def __repr__(self):
if self.values:
return "Constraint: %s - %s" % (self.name, self.values)
else:
return "Constraint: %s" % self.name
class Parameter(object):
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib.get('name')
self.values = [i.text for i in elem.findall(util.nspath('Value', namespace))]
self.values += [i.text for i in elem.findall(util.nspath('AllowedValues/Value', namespace))]
def __repr__(self):
if self.values:
return "Parameter: %s - %s" % (self.name, self.values)
else:
return "Parameter: %s" % self.name
class OperationsMetadata(object):
"""Initialize an OWS OperationMetadata construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
if 'name' not in elem.attrib: # This is not a valid element
return
self.name = elem.attrib['name']
self.formatOptions = ['text/xml']
parameters = []
self.methods = []
self.constraints = []
for verb in elem.findall(util.nspath('DCP/HTTP/*', namespace)):
url = util.testXMLAttribute(verb, util.nspath('href', XLINK_NAMESPACE))
if url is not None:
verb_constraints = [Constraint(conts, namespace) for conts in verb.findall(
util.nspath('Constraint', namespace))]
self.methods.append({'constraints': verb_constraints, 'type': util.xmltag_split(verb.tag), 'url': url})
for parameter in elem.findall(util.nspath('Parameter', namespace)):
if namespace == OWS_NAMESPACE_1_1_0:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(
util.nspath('AllowedValues/Value', namespace))]}))
else:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(
util.nspath('Value', namespace))]}))
self.parameters = dict(parameters)
for constraint in elem.findall(util.nspath('Constraint', namespace)):
self.constraints.append(Constraint(constraint, namespace))
def __str__(self):
return "Operation: {}, format={}".format(self.name, self.formatOptions)
def __repr__(self):
return '<owslib.ows.OperationsMetadata {} at {}>'.format(self.name, hex(id(self)))
class BoundingBox(object):
"""Initialize an OWS BoundingBox construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.minx = None
self.miny = None
self.maxx = None
self.maxy = None
self.crs = None
self.dimensions = 2
if elem is None:
return
val = elem.attrib.get('crs') or elem.attrib.get('{{{}}}crs'.format(namespace))
if val:
try:
self.crs = crs.Crs(val)
except (AttributeError, ValueError):
LOGGER.warning('Invalid CRS %r. Expected integer' % val)
else:
self.crs = None
val = elem.attrib.get('dimensions') or elem.attrib.get('{{{}}}dimensions'.format(namespace))
if val is not None:
self.dimensions = int(util.testXMLValue(val, True))
else: # assume 2
self.dimensions = 2
val = elem.find(util.nspath('LowerCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.minx, self.miny = xy[1], xy[0]
else:
self.minx, self.miny = xy[0], xy[1]
val = elem.find(util.nspath('UpperCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.maxx, self.maxy = xy[1], xy[0]
else:
self.maxx, self.maxy = xy[0], xy[1]
class WGS84BoundingBox(BoundingBox):
"""WGS84 bbox, axis order xy"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
BoundingBox.__init__(self, elem, namespace)
self.dimensions = 2
self.crs = crs.Crs('urn:ogc:def:crs:OGC:2:84')
class ExceptionReport(Exception):
"""OWS ExceptionReport"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.exceptions = []
if hasattr(elem, 'getroot'):
elem = elem.getroot()
for i in elem.findall(util.nspath('Exception', namespace)):
tmp = {}
val = i.attrib.get('exceptionCode')
tmp['exceptionCode'] = util.testXMLValue(val, True)
val = i.attrib.get('locator')
tmp['locator'] = util.testXMLValue(val, True)
val = i.find(util.nspath('ExceptionText', namespace))
tmp['ExceptionText'] = util.testXMLValue(val)
self.exceptions.append(tmp)
# set topmost stacktrace as return message
self.code = self.exceptions[0]['exceptionCode']
self.locator = self.exceptions[0]['locator']
self.msg = self.exceptions[0]['ExceptionText']
self.xml = etree.tostring(elem)
def __str__(self):
return repr(self.msg) | owslib/ows.py | import logging
from owslib.etree import etree
from owslib import crs, util
from owslib.namespaces import Namespaces
LOGGER = logging.getLogger(__name__)
n = Namespaces()
OWS_NAMESPACE_1_0_0 = n.get_namespace("ows")
OWS_NAMESPACE_1_1_0 = n.get_namespace("ows110")
OWS_NAMESPACE_2_0_0 = n.get_namespace("ows200")
XSI_NAMESPACE = n.get_namespace("xsi")
XLINK_NAMESPACE = n.get_namespace("xlink")
DEFAULT_OWS_NAMESPACE = OWS_NAMESPACE_1_1_0 # Use this as default for OWSCommon objects
class OwsCommon(object):
"""Initialize OWS Common object"""
def __init__(self, version):
self.version = version
if version == '1.0.0':
self.namespace = OWS_NAMESPACE_1_0_0
elif version == '1.1.0':
self.namespace = OWS_NAMESPACE_1_1_0
else:
self.namespace = OWS_NAMESPACE_2_0_0
class ServiceIdentification(object):
"""Initialize an OWS Common ServiceIdentification construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('Title', namespace))
self.title = util.testXMLValue(val)
val = self._root.find(util.nspath('Abstract', namespace))
self.abstract = util.testXMLValue(val)
self.keywords = []
for f in self._root.findall(util.nspath('Keywords/Keyword', namespace)):
if f.text is not None:
self.keywords.append(f.text)
val = self._root.find(util.nspath('AccessConstraints', namespace))
self.accessconstraints = util.testXMLValue(val)
val = self._root.find(util.nspath('Fees', namespace))
self.fees = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceType', namespace))
self.type = util.testXMLValue(val)
self.service = self.type # alternative? keep both?discuss
val = self._root.find(util.nspath('ServiceTypeVersion', namespace))
self.version = util.testXMLValue(val)
self.versions = []
for v in self._root.findall(util.nspath('ServiceTypeVersion', namespace)):
self.versions.append(util.testXMLValue(v))
self.profiles = []
for p in self._root.findall(util.nspath('Profile', namespace)):
self.profiles.append(util.testXMLValue(p))
def __str__(self):
return 'Service: {}, title={}'.format(self.service, self.title or '')
def __repr__(self):
return '<owslib.ows.ServiceIdentification {} at {}>'.format(self.service, hex(id(self)))
class ServiceProvider(object):
"""Initialize an OWS Common ServiceProvider construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.contact = ServiceContact(infoset, namespace)
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
try:
urlattrib = val.attrib[util.nspath('href', XLINK_NAMESPACE)]
self.url = util.testXMLValue(urlattrib, True)
except KeyError:
self.url = None
else:
self.url = None
class ServiceContact(object):
"""Initialize an OWS Common ServiceContact construct"""
def __init__(self, infoset, namespace=DEFAULT_OWS_NAMESPACE):
self._root = infoset
val = self._root.find(util.nspath('ProviderName', namespace))
self.name = util.testXMLValue(val)
self.organization = util.testXMLValue(
self._root.find(util.nspath('ContactPersonPrimary/ContactOrganization', namespace)))
val = self._root.find(util.nspath('ProviderSite', namespace))
if val is not None:
self.site = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.site = None
val = self._root.find(util.nspath('ServiceContact/Role', namespace))
self.role = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/IndividualName', namespace))
self.name = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/PositionName', namespace))
self.position = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Voice', namespace))
self.phone = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Phone/Facsimile', namespace))
self.fax = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/DeliveryPoint', namespace))
self.address = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/City', namespace))
self.city = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/AdministrativeArea', namespace))
self.region = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/PostalCode', namespace))
self.postcode = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/Country', namespace))
self.country = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/Address/ElectronicMailAddress', namespace))
self.email = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/OnlineResource', namespace))
if val is not None:
self.url = util.testXMLValue(val.attrib.get(util.nspath('href', XLINK_NAMESPACE)), True)
else:
self.url = None
val = self._root.find(util.nspath('ServiceContact/ContactInfo/HoursOfService', namespace))
self.hours = util.testXMLValue(val)
val = self._root.find(util.nspath('ServiceContact/ContactInfo/ContactInstructions', namespace))
self.instructions = util.testXMLValue(val)
class Constraint(object):
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib.get('name')
self.values = [i.text for i in elem.findall(util.nspath('Value', namespace))]
self.values += [i.text for i in elem.findall(util.nspath('AllowedValues/Value', namespace))]
def __repr__(self):
if self.values:
return "Constraint: %s - %s" % (self.name, self.values)
else:
return "Constraint: %s" % self.name
class Parameter(object):
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.name = elem.attrib.get('name')
self.values = [i.text for i in elem.findall(util.nspath('Value', namespace))]
self.values += [i.text for i in elem.findall(util.nspath('AllowedValues/Value', namespace))]
def __repr__(self):
if self.values:
return "Parameter: %s - %s" % (self.name, self.values)
else:
return "Parameter: %s" % self.name
class OperationsMetadata(object):
"""Initialize an OWS OperationMetadata construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
if 'name' not in elem.attrib: # This is not a valid element
return
self.name = elem.attrib['name']
self.formatOptions = ['text/xml']
parameters = []
self.methods = []
self.constraints = []
for verb in elem.findall(util.nspath('DCP/HTTP/*', namespace)):
url = util.testXMLAttribute(verb, util.nspath('href', XLINK_NAMESPACE))
if url is not None:
verb_constraints = [Constraint(conts, namespace) for conts in verb.findall(
util.nspath('Constraint', namespace))]
self.methods.append({'constraints': verb_constraints, 'type': util.xmltag_split(verb.tag), 'url': url})
for parameter in elem.findall(util.nspath('Parameter', namespace)):
if namespace == OWS_NAMESPACE_1_1_0:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(
util.nspath('AllowedValues/Value', namespace))]}))
else:
parameters.append((parameter.attrib['name'], {'values': [i.text for i in parameter.findall(
util.nspath('Value', namespace))]}))
self.parameters = dict(parameters)
for constraint in elem.findall(util.nspath('Constraint', namespace)):
self.constraints.append(Constraint(constraint, namespace))
def __str__(self):
return "Operation: {}, format={}".format(self.name, self.formatOptions)
def __repr__(self):
return '<owslib.ows.OperationsMetadata {} at {}>'.format(self.name, hex(id(self)))
class BoundingBox(object):
"""Initialize an OWS BoundingBox construct"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.minx = None
self.miny = None
self.maxx = None
self.maxy = None
self.crs = None
self.dimensions = 2
if elem is None:
return
val = elem.attrib.get('crs') or elem.attrib.get('{{{}}}crs'.format(namespace))
if val:
try:
self.crs = crs.Crs(val)
except (AttributeError, ValueError):
LOGGER.warning('Invalid CRS %r. Expected integer' % val)
else:
self.crs = None
val = elem.attrib.get('dimensions') or elem.attrib.get('{{{}}}dimensions'.format(namespace))
if val is not None:
self.dimensions = int(util.testXMLValue(val, True))
else: # assume 2
self.dimensions = 2
val = elem.find(util.nspath('LowerCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.minx, self.miny = xy[1], xy[0]
else:
self.minx, self.miny = xy[0], xy[1]
val = elem.find(util.nspath('UpperCorner', namespace))
tmp = util.testXMLValue(val)
if tmp is not None:
xy = tmp.split()
if len(xy) > 1:
if self.crs is not None and self.crs.axisorder == 'yx':
self.maxx, self.maxy = xy[1], xy[0]
else:
self.maxx, self.maxy = xy[0], xy[1]
class WGS84BoundingBox(BoundingBox):
"""WGS84 bbox, axis order xy"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
BoundingBox.__init__(self, elem, namespace)
self.dimensions = 2
self.crs = crs.Crs('urn:ogc:def:crs:OGC:2:84')
class ExceptionReport(Exception):
"""OWS ExceptionReport"""
def __init__(self, elem, namespace=DEFAULT_OWS_NAMESPACE):
self.exceptions = []
if hasattr(elem, 'getroot'):
elem = elem.getroot()
for i in elem.findall(util.nspath('Exception', namespace)):
tmp = {}
val = i.attrib.get('exceptionCode')
tmp['exceptionCode'] = util.testXMLValue(val, True)
val = i.attrib.get('locator')
tmp['locator'] = util.testXMLValue(val, True)
val = i.find(util.nspath('ExceptionText', namespace))
tmp['ExceptionText'] = util.testXMLValue(val)
self.exceptions.append(tmp)
# set topmost stacktrace as return message
self.code = self.exceptions[0]['exceptionCode']
self.locator = self.exceptions[0]['locator']
self.msg = self.exceptions[0]['ExceptionText']
self.xml = etree.tostring(elem)
def __str__(self):
return repr(self.msg) | 0.405802 | 0.080538 |
from nose.tools import eq_
import mkt
import mkt.site.tests
from mkt.site.utils import app_factory, version_factory
from mkt.reviewers import helpers
class TestGetPosition(mkt.site.tests.TestCase):
def setUp(self):
# Add a public, reviewed app for measure. It took 4 days for this app
# to get reviewed.
self.public_app = app_factory(
version_kw={'created': self.days_ago(7),
'nomination': self.days_ago(7),
'reviewed': self.days_ago(3)})
# Took 8 days for another public app to get reviewed.
app_factory(
version_kw={'nomination': self.days_ago(10),
'reviewed': self.days_ago(2)})
# Add to the queue 2 pending apps for good measure.
app_factory(
status=mkt.STATUS_PENDING,
file_kw={'status': mkt.STATUS_PENDING},
version_kw={'nomination': self.days_ago(3)})
app_factory(
status=mkt.STATUS_PENDING,
file_kw={'status': mkt.STATUS_PENDING},
version_kw={'nomination': self.days_ago(1)})
# A deleted app that shouldn't change calculations.
app_factory(
status=mkt.STATUS_DELETED,
file_kw={'status': mkt.STATUS_PENDING},
version_kw={'nomination': self.days_ago(1)})
def test_min(self):
pending_app = app_factory(
status=mkt.STATUS_PENDING,
file_kw={'status': mkt.STATUS_PENDING},
version_kw={'nomination': self.days_ago(42)})
pos = helpers.get_position(pending_app)
eq_(pos['days'], 1)
def test_packaged_app(self):
self.public_app.update(is_packaged=True)
version = version_factory(
addon=self.public_app, file_kw={'status': mkt.STATUS_PENDING})
self.public_app.reload()
eq_(self.public_app.latest_version, version)
self._test_position(self.public_app)
def test_pending_app(self):
pending_app = app_factory(
status=mkt.STATUS_PENDING,
file_kw={'status': mkt.STATUS_PENDING})
self._test_position(pending_app)
def _test_position(self, app):
app.latest_version.update(nomination=self.days_ago(2))
pos = helpers.get_position(app)
# We set the nomination to 2 days ago.
eq_(pos['days_in_queue'], 2)
# There are three pending apps.
eq_(pos['total'], 3)
# It took 12 days for 2 apps to get reviewed, giving us an average of
# 6 days to go from pending->public, but we've already waited 2 days.
eq_(pos['days'], 4)
# There is one pending app in front of us.
eq_(pos['pos'], 2) | mkt/reviewers/tests/test_helpers.py | from nose.tools import eq_
import mkt
import mkt.site.tests
from mkt.site.utils import app_factory, version_factory
from mkt.reviewers import helpers
class TestGetPosition(mkt.site.tests.TestCase):
def setUp(self):
# Add a public, reviewed app for measure. It took 4 days for this app
# to get reviewed.
self.public_app = app_factory(
version_kw={'created': self.days_ago(7),
'nomination': self.days_ago(7),
'reviewed': self.days_ago(3)})
# Took 8 days for another public app to get reviewed.
app_factory(
version_kw={'nomination': self.days_ago(10),
'reviewed': self.days_ago(2)})
# Add to the queue 2 pending apps for good measure.
app_factory(
status=mkt.STATUS_PENDING,
file_kw={'status': mkt.STATUS_PENDING},
version_kw={'nomination': self.days_ago(3)})
app_factory(
status=mkt.STATUS_PENDING,
file_kw={'status': mkt.STATUS_PENDING},
version_kw={'nomination': self.days_ago(1)})
# A deleted app that shouldn't change calculations.
app_factory(
status=mkt.STATUS_DELETED,
file_kw={'status': mkt.STATUS_PENDING},
version_kw={'nomination': self.days_ago(1)})
def test_min(self):
pending_app = app_factory(
status=mkt.STATUS_PENDING,
file_kw={'status': mkt.STATUS_PENDING},
version_kw={'nomination': self.days_ago(42)})
pos = helpers.get_position(pending_app)
eq_(pos['days'], 1)
def test_packaged_app(self):
self.public_app.update(is_packaged=True)
version = version_factory(
addon=self.public_app, file_kw={'status': mkt.STATUS_PENDING})
self.public_app.reload()
eq_(self.public_app.latest_version, version)
self._test_position(self.public_app)
def test_pending_app(self):
pending_app = app_factory(
status=mkt.STATUS_PENDING,
file_kw={'status': mkt.STATUS_PENDING})
self._test_position(pending_app)
def _test_position(self, app):
app.latest_version.update(nomination=self.days_ago(2))
pos = helpers.get_position(app)
# We set the nomination to 2 days ago.
eq_(pos['days_in_queue'], 2)
# There are three pending apps.
eq_(pos['total'], 3)
# It took 12 days for 2 apps to get reviewed, giving us an average of
# 6 days to go from pending->public, but we've already waited 2 days.
eq_(pos['days'], 4)
# There is one pending app in front of us.
eq_(pos['pos'], 2) | 0.66072 | 0.247089 |
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ann import ANN
# Read the data from the CSV file
red_data_path = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
white_data_path = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv"
class WineDataReader():
"""
Class for reading the data from the CSV file and create the input and target tensors for the neural network.
"""
def __init__(self, red_data_path: str, white_data_path: str) -> None:
self.red_data_path = red_data_path
self.white_data_path = white_data_path
def _read_data(self) -> tuple:
"""
Read the data from the CSV file
:param red_data_path: path to the red data
:param white_data_path: path to the white data
:return: red and white data
"""
red = pd.read_csv(self.red_data_path, sep=";")
white = pd.read_csv(self.white_data_path, sep=";")
return red, white
def _create_label(self, red: pd.DataFrame, white: pd.DataFrame) -> tuple:
"""
Create label for 2 dataset with red = 1 and white = 0 and merge them.
:param red: red data
:param white: white data
:return: red and white data
"""
red["label"] = 1
white["label"] = 0
dataset = pd.concat([red, white], ignore_index=True)
return dataset
def _split_data(self, dataset: pd.DataFrame) -> tuple:
"""
Split the dataset into training, develop and test set with 80%/10%/10% ratio.
:param dataset: dataset
:return: training, develop and test set
"""
train, dev, test = np.split(
dataset.sample(frac=1), [int(0.8 * len(dataset)), int(0.9 * len(dataset))]
)
return train, dev, test
def create_dataset(self) -> tuple:
"""
Create the input and target tensors for the neural network.
:return: input and target tensors
"""
red, white = self._read_data()
dataset = self._create_label(red, white)
train, dev, test = self._split_data(dataset)
x_train, y_train = (
torch.from_numpy(train.drop(["label"], axis=1).values).float(),
torch.from_numpy(train["label"].values.reshape(-1, 1)).float(),
)
x_val, y_val = (
torch.from_numpy(dev.drop(["label"], axis=1).values).float(),
torch.from_numpy(dev["label"].values.reshape(-1, 1)).float(),
)
x_test, y_test = (
torch.from_numpy(test.drop(["label"], axis=1).values).float(),
torch.from_numpy(test["label"].values.reshape(-1, 1)).float(),
)
return x_train, y_train, x_val, y_val, x_test, y_test
# Create the ANN architecture with validation set.
class ANNClassifier(ANN):
"""
ANN Classifier with validation set.
"""
def __init__(
self, input_size: int, output_size: int, hidden_size: int = 64
) -> None:
"""
Initialize the neural network.
:param input_size: input size
:param output_size: output size
:param hidden_size: hidden size
"""
super().__init__(input_size, output_size, hidden_size)
# Override the train method to use validation set.
def train(
self,
training_set: tuple,
validation_set: tuple,
batch_size: int = 32,
epochs: int = 1000,
lr: float = 0.05,
criterion: torch.nn = nn.CrossEntropyLoss(),
) -> None:
"""
Train the neural network and validate the model with the validation set.
:param training_set: training set
:param validation_set: validation set
:param batch_size: batch size
:param epochs: number of epochs
:param lr: learning rate
:param criterion: loss function
"""
optimizer = optim.SGD(self.parameters(), lr=lr)
for epoch in range(epochs):
for i in range(0, len(training_set[0]), batch_size):
# Calculate the training loss and validation loss.
y_train_pred = self.forward(training_set[0][i : i + batch_size])
train_loss = criterion(
y_train_pred, training_set[1][i : i + batch_size]
)
# Update the weights and biases.
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
# Print the training loss and validation loss each 100 epochs.
if epoch % 100 == 0:
y_val_pred = self.forward(validation_set[0])
val_loss = criterion(y_val_pred, validation_set[1])
print(
f"Epoch: {epoch}, Train Loss: {train_loss.item()}, Validation Loss: {val_loss.item()}"
)
def predict(self, x: torch.Tensor) -> torch.Tensor:
"""
Predict the label of the test set.
"""
return super().forward(x)
def evaluate(self, x: torch.Tensor, y: torch.Tensor) -> float:
"""
Evaluate the model with the test set.
:param x: test set
:param y: test label
:return: accuracy
"""
y_pred = self.predict(x)
y_pred = (y_pred > 0.5).float()
return (y_pred == y).sum().item() / len(y)
if __name__ == "__main__":
# Calculate the accuracy of the model on the test set.
x_train, y_train, x_val, y_val, x_test, y_test = WineDataReader(
red_data_path, white_data_path
).create_dataset()
model = ANNClassifier(input_size=12, output_size=1, hidden_size=64)
model.train(
training_set=(x_train, y_train),
validation_set=(x_val, y_val),
batch_size=128,
epochs=1000,
lr=0.05,
criterion=nn.MSELoss(),
)
y_pred = model.predict(x_test)
# Convert the prediction to 0 and 1.
y_pred = (y_pred > 0.5).float()
print(f"Test accuracy: {(y_pred == y_test).sum().item() / len(y_test)}") | DNN/wine_classification_ann.py | import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from ann import ANN
# Read the data from the CSV file
red_data_path = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv"
white_data_path = "https://archive.ics.uci.edu/ml/machine-learning-databases/wine-quality/winequality-white.csv"
class WineDataReader():
"""
Class for reading the data from the CSV file and create the input and target tensors for the neural network.
"""
def __init__(self, red_data_path: str, white_data_path: str) -> None:
self.red_data_path = red_data_path
self.white_data_path = white_data_path
def _read_data(self) -> tuple:
"""
Read the data from the CSV file
:param red_data_path: path to the red data
:param white_data_path: path to the white data
:return: red and white data
"""
red = pd.read_csv(self.red_data_path, sep=";")
white = pd.read_csv(self.white_data_path, sep=";")
return red, white
def _create_label(self, red: pd.DataFrame, white: pd.DataFrame) -> tuple:
"""
Create label for 2 dataset with red = 1 and white = 0 and merge them.
:param red: red data
:param white: white data
:return: red and white data
"""
red["label"] = 1
white["label"] = 0
dataset = pd.concat([red, white], ignore_index=True)
return dataset
def _split_data(self, dataset: pd.DataFrame) -> tuple:
"""
Split the dataset into training, develop and test set with 80%/10%/10% ratio.
:param dataset: dataset
:return: training, develop and test set
"""
train, dev, test = np.split(
dataset.sample(frac=1), [int(0.8 * len(dataset)), int(0.9 * len(dataset))]
)
return train, dev, test
def create_dataset(self) -> tuple:
"""
Create the input and target tensors for the neural network.
:return: input and target tensors
"""
red, white = self._read_data()
dataset = self._create_label(red, white)
train, dev, test = self._split_data(dataset)
x_train, y_train = (
torch.from_numpy(train.drop(["label"], axis=1).values).float(),
torch.from_numpy(train["label"].values.reshape(-1, 1)).float(),
)
x_val, y_val = (
torch.from_numpy(dev.drop(["label"], axis=1).values).float(),
torch.from_numpy(dev["label"].values.reshape(-1, 1)).float(),
)
x_test, y_test = (
torch.from_numpy(test.drop(["label"], axis=1).values).float(),
torch.from_numpy(test["label"].values.reshape(-1, 1)).float(),
)
return x_train, y_train, x_val, y_val, x_test, y_test
# Create the ANN architecture with validation set.
class ANNClassifier(ANN):
"""
ANN Classifier with validation set.
"""
def __init__(
self, input_size: int, output_size: int, hidden_size: int = 64
) -> None:
"""
Initialize the neural network.
:param input_size: input size
:param output_size: output size
:param hidden_size: hidden size
"""
super().__init__(input_size, output_size, hidden_size)
# Override the train method to use validation set.
def train(
self,
training_set: tuple,
validation_set: tuple,
batch_size: int = 32,
epochs: int = 1000,
lr: float = 0.05,
criterion: torch.nn = nn.CrossEntropyLoss(),
) -> None:
"""
Train the neural network and validate the model with the validation set.
:param training_set: training set
:param validation_set: validation set
:param batch_size: batch size
:param epochs: number of epochs
:param lr: learning rate
:param criterion: loss function
"""
optimizer = optim.SGD(self.parameters(), lr=lr)
for epoch in range(epochs):
for i in range(0, len(training_set[0]), batch_size):
# Calculate the training loss and validation loss.
y_train_pred = self.forward(training_set[0][i : i + batch_size])
train_loss = criterion(
y_train_pred, training_set[1][i : i + batch_size]
)
# Update the weights and biases.
optimizer.zero_grad()
train_loss.backward()
optimizer.step()
# Print the training loss and validation loss each 100 epochs.
if epoch % 100 == 0:
y_val_pred = self.forward(validation_set[0])
val_loss = criterion(y_val_pred, validation_set[1])
print(
f"Epoch: {epoch}, Train Loss: {train_loss.item()}, Validation Loss: {val_loss.item()}"
)
def predict(self, x: torch.Tensor) -> torch.Tensor:
"""
Predict the label of the test set.
"""
return super().forward(x)
def evaluate(self, x: torch.Tensor, y: torch.Tensor) -> float:
"""
Evaluate the model with the test set.
:param x: test set
:param y: test label
:return: accuracy
"""
y_pred = self.predict(x)
y_pred = (y_pred > 0.5).float()
return (y_pred == y).sum().item() / len(y)
if __name__ == "__main__":
# Calculate the accuracy of the model on the test set.
x_train, y_train, x_val, y_val, x_test, y_test = WineDataReader(
red_data_path, white_data_path
).create_dataset()
model = ANNClassifier(input_size=12, output_size=1, hidden_size=64)
model.train(
training_set=(x_train, y_train),
validation_set=(x_val, y_val),
batch_size=128,
epochs=1000,
lr=0.05,
criterion=nn.MSELoss(),
)
y_pred = model.predict(x_test)
# Convert the prediction to 0 and 1.
y_pred = (y_pred > 0.5).float()
print(f"Test accuracy: {(y_pred == y_test).sum().item() / len(y_test)}") | 0.907606 | 0.734346 |
import tensorflow as tf
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
the list of cropped images.
"""
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def _mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize(image, [new_height, new_width])
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
smallest_side = tf.cast(smallest_side, tf.float32)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.cast(tf.math.rint(height * scale), tf.int32)
new_width = tf.cast(tf.math.rint(width * scale), tf.int32)
return new_height, new_width
def inception_preprocess(image,
height,
width,
central_fraction=0.875,
scope=None,
central_crop=False):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
central_crop: Enable central cropping of images during preprocessing for
evaluation.
Returns:
3-D float Tensor of prepared image.
"""
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize(image, [height, width])
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def vgg_preprocess(image, output_height, output_width):
image = _aspect_preserving_resize(image, _RESIZE_SIDE_MIN)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.cast(image, tf.float32)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) | tftrt/examples/image_classification/preprocessing.py | import tensorflow as tf
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
_RESIZE_SIDE_MIN = 256
_RESIZE_SIDE_MAX = 512
def _crop(image, offset_height, offset_width, crop_height, crop_width):
"""Crops the given image using the provided offsets and sizes.
Note that the method doesn't assume we know the input image size but it does
assume we know the input image rank.
Args:
image: an image of shape [height, width, channels].
offset_height: a scalar tensor indicating the height offset.
offset_width: a scalar tensor indicating the width offset.
crop_height: the height of the cropped image.
crop_width: the width of the cropped image.
Returns:
the cropped (and resized) image.
Raises:
InvalidArgumentError: if the rank is not 3 or if the image dimensions are
less than the crop size.
"""
original_shape = tf.shape(image)
rank_assertion = tf.Assert(
tf.equal(tf.rank(image), 3),
['Rank of image must be equal to 3.'])
with tf.control_dependencies([rank_assertion]):
cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])
size_assertion = tf.Assert(
tf.logical_and(
tf.greater_equal(original_shape[0], crop_height),
tf.greater_equal(original_shape[1], crop_width)),
['Crop size greater than the image size.'])
offsets = tf.cast(tf.stack([offset_height, offset_width, 0]), tf.int32)
# Use tf.slice instead of crop_to_bounding box as it accepts tensors to
# define the crop size.
with tf.control_dependencies([size_assertion]):
image = tf.slice(image, offsets, cropped_shape)
return tf.reshape(image, cropped_shape)
def _central_crop(image_list, crop_height, crop_width):
"""Performs central crops of the given image list.
Args:
image_list: a list of image tensors of the same dimension but possibly
varying channel.
crop_height: the height of the image following the crop.
crop_width: the width of the image following the crop.
Returns:
the list of cropped images.
"""
outputs = []
for image in image_list:
image_height = tf.shape(image)[0]
image_width = tf.shape(image)[1]
offset_height = (image_height - crop_height) / 2
offset_width = (image_width - crop_width) / 2
outputs.append(_crop(image, offset_height, offset_width,
crop_height, crop_width))
return outputs
def _mean_image_subtraction(image, means):
"""Subtracts the given means from each image channel.
For example:
means = [123.68, 116.779, 103.939]
image = _mean_image_subtraction(image, means)
Note that the rank of `image` must be known.
Args:
image: a tensor of size [height, width, C].
means: a C-vector of values to subtract from each channel.
Returns:
the centered image.
Raises:
ValueError: If the rank of `image` is unknown, if `image` has a rank other
than three or if the number of channels in `image` doesn't match the
number of values in `means`.
"""
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return tf.concat(axis=2, values=channels)
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize(image, [new_height, new_width])
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
def _smallest_size_at_least(height, width, smallest_side):
"""Computes new shape with the smallest side equal to `smallest_side`.
Computes new shape with the smallest side equal to `smallest_side` while
preserving the original aspect ratio.
Args:
height: an int32 scalar tensor indicating the current height.
width: an int32 scalar tensor indicating the current width.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
new_height: an int32 scalar tensor indicating the new height.
new_width: and int32 scalar tensor indicating the new width.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
height = tf.cast(height, tf.float32)
width = tf.cast(width, tf.float32)
smallest_side = tf.cast(smallest_side, tf.float32)
scale = tf.cond(tf.greater(height, width),
lambda: smallest_side / width,
lambda: smallest_side / height)
new_height = tf.cast(tf.math.rint(height * scale), tf.int32)
new_width = tf.cast(tf.math.rint(width * scale), tf.int32)
return new_height, new_width
def inception_preprocess(image,
height,
width,
central_fraction=0.875,
scope=None,
central_crop=False):
"""Prepare one image for evaluation.
If height and width are specified it would output an image with that size by
applying resize_bilinear.
If central_fraction is specified it would crop the central fraction of the
input image.
Args:
image: 3-D Tensor of image. If dtype is tf.float32 then the range should be
[0, 1], otherwise it would converted to tf.float32 assuming that the range
is [0, MAX], where MAX is largest positive representable number for
int(8/16/32) data type (see `tf.image.convert_image_dtype` for details).
height: integer
width: integer
central_fraction: Optional Float, fraction of the image to crop.
scope: Optional scope for name_scope.
central_crop: Enable central cropping of images during preprocessing for
evaluation.
Returns:
3-D float Tensor of prepared image.
"""
if image.dtype != tf.float32:
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=central_fraction)
if height and width:
# Resize the image to the specified height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize(image, [height, width])
image = tf.squeeze(image, [0])
image = tf.subtract(image, 0.5)
image = tf.multiply(image, 2.0)
return image
def vgg_preprocess(image, output_height, output_width):
image = _aspect_preserving_resize(image, _RESIZE_SIDE_MIN)
image = _central_crop([image], output_height, output_width)[0]
image.set_shape([output_height, output_width, 3])
image = tf.cast(image, tf.float32)
return _mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]) | 0.95321 | 0.623635 |
from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
# Python stdlib imports
from datetime import (
time,
datetime,
date,
timedelta,
)
# 3rd party imports
import pytest
# package imports
from openpyxl.comments import Comment
from openpyxl.cell.cell import ERROR_CODES
@pytest.fixture
def DummyWorksheet():
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.utils.datetime import CALENDAR_WINDOWS_1900
from openpyxl.cell import Cell
class Wb(object):
epoch = CALENDAR_WINDOWS_1900
_fonts = IndexedList()
_fills = IndexedList()
_borders = IndexedList()
_protections = IndexedList()
_alignments = IndexedList()
_number_formats = IndexedList()
_cell_styles = IndexedList()
class Ws(object):
encoding = 'utf-8'
parent = Wb()
title = "Dummy Worksheet"
_comment_count = 0
def cell(self, column, row):
return Cell(self, row=row, column=column)
return Ws()
@pytest.fixture
def Cell():
from ..cell import Cell
return Cell
@pytest.fixture
def dummy_cell(DummyWorksheet, Cell):
ws = DummyWorksheet
cell = Cell(ws, column=1, row=1)
return cell
def test_ctor(dummy_cell):
cell = dummy_cell
assert cell.data_type == 'n'
assert cell.column == 1
assert cell.row == 1
assert cell.coordinate == "A1"
assert cell.value is None
assert cell.comment is None
@pytest.mark.parametrize("datatype", ['n', 'd', 's', 'b', 'f', 'e'])
def test_null(dummy_cell, datatype):
cell = dummy_cell
cell.data_type = datatype
assert cell.data_type == datatype
cell.value = None
assert cell.data_type == 'n'
@pytest.mark.parametrize("value", ['hello', ".", '0800'])
def test_string(dummy_cell, value):
cell = dummy_cell
cell.value = 'hello'
assert cell.data_type == 's'
@pytest.mark.parametrize("value", ['=42', '=if(A1<4;-1;1)'])
def test_formula(dummy_cell, value):
cell = dummy_cell
cell.value = value
assert cell.data_type == 'f'
def test_not_formula(dummy_cell):
dummy_cell.value = "="
assert dummy_cell.data_type == 's'
assert dummy_cell.value == "="
@pytest.mark.parametrize("value", [True, False])
def test_boolean(dummy_cell, value):
cell = dummy_cell
cell.value = value
assert cell.data_type == 'b'
@pytest.mark.parametrize("error_string", ERROR_CODES)
def test_error_codes(dummy_cell, error_string):
cell = dummy_cell
cell.value = error_string
assert cell.data_type == 'e'
@pytest.mark.parametrize("value, number_format",
[
(
datetime(2010, 7, 13, 6, 37, 41),
"yyyy-mm-dd h:mm:ss"
),
(
date(2010, 7, 13),
"yyyy-mm-dd"
),
(
time(1, 3),
"h:mm:ss",
)
]
)
def test_insert_date(dummy_cell, value, number_format):
cell = dummy_cell
cell.value = value
assert cell.data_type == 'd'
assert cell.is_date
assert cell.number_format == number_format
@pytest.mark.pandas_required
def test_timstamp(dummy_cell):
from pandas import Timestamp
cell = dummy_cell
cell.value = Timestamp("2018-09-05")
assert cell.number_format == "yyyy-mm-dd h:mm:ss"
def test_not_overwrite_time_format(dummy_cell):
cell = dummy_cell
cell.number_format = "mmm-yy"
cell.value = date(2010, 7, 13)
assert cell.number_format == "mmm-yy"
@pytest.mark.parametrize("value, is_date",
[
(None, True,),
("testme", False),
(True, False),
]
)
def test_cell_formatted_as_date(dummy_cell, value, is_date):
cell = dummy_cell
cell.value = datetime.today()
cell.value = value
assert cell.is_date == is_date
assert cell.value == value
def test_set_bad_type(dummy_cell):
cell = dummy_cell
with pytest.raises(ValueError):
cell.set_explicit_value(1, 'q')
def test_illegal_characters(dummy_cell):
from openpyxl.utils.exceptions import IllegalCharacterError
from openpyxl.compat import range
from itertools import chain
cell = dummy_cell
# The bytes 0x00 through 0x1F inclusive must be manually escaped in values.
illegal_chrs = chain(range(9), range(11, 13), range(14, 32))
for i in illegal_chrs:
with pytest.raises(IllegalCharacterError):
cell.value = chr(i)
with pytest.raises(IllegalCharacterError):
cell.value = "A {0} B".format(chr(i))
cell.value = chr(33)
cell.value = chr(9) # Tab
cell.value = chr(10) # Newline
cell.value = chr(13) # Carriage return
cell.value = " Leading and trailing spaces are legal "
@pytest.mark.xfail
def test_timedelta(dummy_cell):
cell = dummy_cell
cell.value = timedelta(days=1, hours=3)
assert cell.value == 1.125
assert cell.data_type == 'n'
assert cell.is_date is False
assert cell.number_format == "[hh]:mm:ss"
def test_repr(dummy_cell):
cell = dummy_cell
assert repr(cell) == "<Cell 'Dummy Worksheet'.A1>"
def test_repr_object(dummy_cell):
class Dummy:
def __str__(self):
return "something"
cell = dummy_cell
try:
cell._bind_value(Dummy())
except ValueError as err:
assert "something" not in str(err)
def test_comment_assignment(dummy_cell):
assert dummy_cell.comment is None
comm = Comment("text", "author")
dummy_cell.comment = comm
assert dummy_cell.comment == comm
def test_only_one_cell_per_comment(dummy_cell):
ws = dummy_cell.parent
comm = Comment('text', 'author')
dummy_cell.comment = comm
c2 = ws.cell(column=1, row=2)
c2.comment = comm
assert c2.comment.parent is c2
def test_remove_comment(dummy_cell):
comm = Comment('text', 'author')
dummy_cell.comment = comm
dummy_cell.comment = None
assert dummy_cell.comment is None
def test_cell_offset(dummy_cell):
cell = dummy_cell
assert cell.offset(2, 1).coordinate == 'B3'
class TestEncoding:
try:
# Python 2
pound = unichr(163)
except NameError:
# Python 3
pound = chr(163)
test_string = ('Compound Value (' + pound + ')').encode('latin1')
def test_bad_encoding(self):
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
cell = ws['A1']
with pytest.raises(UnicodeDecodeError):
cell.check_string(self.test_string)
with pytest.raises(UnicodeDecodeError):
cell.value = self.test_string
def test_good_encoding(self):
from openpyxl import Workbook
wb = Workbook()
wb.encoding = 'latin1'
ws = wb.active
cell = ws['A1']
cell.value = self.test_string
def test_font(DummyWorksheet, Cell):
from openpyxl.styles import Font
font = Font(bold=True)
ws = DummyWorksheet
ws.parent._fonts.add(font)
cell = Cell(ws, row=1, column=1)
assert cell.font == font
def test_fill(DummyWorksheet, Cell):
from openpyxl.styles import PatternFill
fill = PatternFill(patternType="solid", fgColor="FF0000")
ws = DummyWorksheet
ws.parent._fills.add(fill)
cell = Cell(ws, column='A', row=1)
assert cell.fill == fill
def test_border(DummyWorksheet, Cell):
from openpyxl.styles import Border
border = Border()
ws = DummyWorksheet
ws.parent._borders.add(border)
cell = Cell(ws, column='A', row=1)
assert cell.border == border
def test_number_format(DummyWorksheet, Cell):
ws = DummyWorksheet
ws.parent._number_formats.add("dd--hh--mm")
cell = Cell(ws, column="A", row=1)
cell.number_format = "dd--hh--mm"
assert cell.number_format == "dd--hh--mm"
def test_alignment(DummyWorksheet, Cell):
from openpyxl.styles import Alignment
align = Alignment(wrapText=True)
ws = DummyWorksheet
ws.parent._alignments.add(align)
cell = Cell(ws, column="A", row=1)
assert cell.alignment == align
def test_protection(DummyWorksheet, Cell):
from openpyxl.styles import Protection
prot = Protection(locked=False)
ws = DummyWorksheet
ws.parent._protections.add(prot)
cell = Cell(ws, column="A", row=1)
assert cell.protection == prot
def test_pivot_button(DummyWorksheet, Cell):
ws = DummyWorksheet
cell = Cell(ws, column="A", row=1)
cell.style_id
cell._style.pivotButton = 1
assert cell.pivotButton is True
def test_quote_prefix(DummyWorksheet, Cell):
ws = DummyWorksheet
cell = Cell(ws, column="A", row=1)
cell.style_id
cell._style.quotePrefix = 1
assert cell.quotePrefix is True
def test_remove_hyperlink(dummy_cell):
"""Remove a cell hyperlink"""
cell = dummy_cell
cell.hyperlink = "http://test.com"
cell.hyperlink = None
assert cell.hyperlink is None
@pytest.fixture
def MergedCell():
from ..cell import MergedCell
return MergedCell(DummyWorksheet())
class TestMergedCell:
def test_value(self, MergedCell):
cell = MergedCell
assert cell._value is None
def test_data_type(self, MergedCell):
cell = MergedCell
assert cell.data_type == 'n'
def test_comment(self, MergedCell):
cell = MergedCell
assert cell.comment is None
def test_coordinate(self, MergedCell):
cell = MergedCell
cell.row = 1
cell.column = 1
assert cell.coordinate == "A1"
def test_repr(self, MergedCell):
cell = MergedCell
cell.row = 1
cell.column = 1
assert repr(cell) == "<MergedCell 'Dummy Worksheet'.A1>"
def test_hyperlink(self, MergedCell):
cell = MergedCell
assert cell.hyperlink is None | openpyxl/cell/tests/test_cell.py | from __future__ import absolute_import
# Copyright (c) 2010-2018 openpyxl
# Python stdlib imports
from datetime import (
time,
datetime,
date,
timedelta,
)
# 3rd party imports
import pytest
# package imports
from openpyxl.comments import Comment
from openpyxl.cell.cell import ERROR_CODES
@pytest.fixture
def DummyWorksheet():
from openpyxl.utils.indexed_list import IndexedList
from openpyxl.utils.datetime import CALENDAR_WINDOWS_1900
from openpyxl.cell import Cell
class Wb(object):
epoch = CALENDAR_WINDOWS_1900
_fonts = IndexedList()
_fills = IndexedList()
_borders = IndexedList()
_protections = IndexedList()
_alignments = IndexedList()
_number_formats = IndexedList()
_cell_styles = IndexedList()
class Ws(object):
encoding = 'utf-8'
parent = Wb()
title = "Dummy Worksheet"
_comment_count = 0
def cell(self, column, row):
return Cell(self, row=row, column=column)
return Ws()
@pytest.fixture
def Cell():
from ..cell import Cell
return Cell
@pytest.fixture
def dummy_cell(DummyWorksheet, Cell):
ws = DummyWorksheet
cell = Cell(ws, column=1, row=1)
return cell
def test_ctor(dummy_cell):
cell = dummy_cell
assert cell.data_type == 'n'
assert cell.column == 1
assert cell.row == 1
assert cell.coordinate == "A1"
assert cell.value is None
assert cell.comment is None
@pytest.mark.parametrize("datatype", ['n', 'd', 's', 'b', 'f', 'e'])
def test_null(dummy_cell, datatype):
cell = dummy_cell
cell.data_type = datatype
assert cell.data_type == datatype
cell.value = None
assert cell.data_type == 'n'
@pytest.mark.parametrize("value", ['hello', ".", '0800'])
def test_string(dummy_cell, value):
cell = dummy_cell
cell.value = 'hello'
assert cell.data_type == 's'
@pytest.mark.parametrize("value", ['=42', '=if(A1<4;-1;1)'])
def test_formula(dummy_cell, value):
cell = dummy_cell
cell.value = value
assert cell.data_type == 'f'
def test_not_formula(dummy_cell):
dummy_cell.value = "="
assert dummy_cell.data_type == 's'
assert dummy_cell.value == "="
@pytest.mark.parametrize("value", [True, False])
def test_boolean(dummy_cell, value):
cell = dummy_cell
cell.value = value
assert cell.data_type == 'b'
@pytest.mark.parametrize("error_string", ERROR_CODES)
def test_error_codes(dummy_cell, error_string):
cell = dummy_cell
cell.value = error_string
assert cell.data_type == 'e'
@pytest.mark.parametrize("value, number_format",
[
(
datetime(2010, 7, 13, 6, 37, 41),
"yyyy-mm-dd h:mm:ss"
),
(
date(2010, 7, 13),
"yyyy-mm-dd"
),
(
time(1, 3),
"h:mm:ss",
)
]
)
def test_insert_date(dummy_cell, value, number_format):
cell = dummy_cell
cell.value = value
assert cell.data_type == 'd'
assert cell.is_date
assert cell.number_format == number_format
@pytest.mark.pandas_required
def test_timstamp(dummy_cell):
from pandas import Timestamp
cell = dummy_cell
cell.value = Timestamp("2018-09-05")
assert cell.number_format == "yyyy-mm-dd h:mm:ss"
def test_not_overwrite_time_format(dummy_cell):
cell = dummy_cell
cell.number_format = "mmm-yy"
cell.value = date(2010, 7, 13)
assert cell.number_format == "mmm-yy"
@pytest.mark.parametrize("value, is_date",
[
(None, True,),
("testme", False),
(True, False),
]
)
def test_cell_formatted_as_date(dummy_cell, value, is_date):
cell = dummy_cell
cell.value = datetime.today()
cell.value = value
assert cell.is_date == is_date
assert cell.value == value
def test_set_bad_type(dummy_cell):
cell = dummy_cell
with pytest.raises(ValueError):
cell.set_explicit_value(1, 'q')
def test_illegal_characters(dummy_cell):
from openpyxl.utils.exceptions import IllegalCharacterError
from openpyxl.compat import range
from itertools import chain
cell = dummy_cell
# The bytes 0x00 through 0x1F inclusive must be manually escaped in values.
illegal_chrs = chain(range(9), range(11, 13), range(14, 32))
for i in illegal_chrs:
with pytest.raises(IllegalCharacterError):
cell.value = chr(i)
with pytest.raises(IllegalCharacterError):
cell.value = "A {0} B".format(chr(i))
cell.value = chr(33)
cell.value = chr(9) # Tab
cell.value = chr(10) # Newline
cell.value = chr(13) # Carriage return
cell.value = " Leading and trailing spaces are legal "
@pytest.mark.xfail
def test_timedelta(dummy_cell):
cell = dummy_cell
cell.value = timedelta(days=1, hours=3)
assert cell.value == 1.125
assert cell.data_type == 'n'
assert cell.is_date is False
assert cell.number_format == "[hh]:mm:ss"
def test_repr(dummy_cell):
cell = dummy_cell
assert repr(cell) == "<Cell 'Dummy Worksheet'.A1>"
def test_repr_object(dummy_cell):
class Dummy:
def __str__(self):
return "something"
cell = dummy_cell
try:
cell._bind_value(Dummy())
except ValueError as err:
assert "something" not in str(err)
def test_comment_assignment(dummy_cell):
assert dummy_cell.comment is None
comm = Comment("text", "author")
dummy_cell.comment = comm
assert dummy_cell.comment == comm
def test_only_one_cell_per_comment(dummy_cell):
ws = dummy_cell.parent
comm = Comment('text', 'author')
dummy_cell.comment = comm
c2 = ws.cell(column=1, row=2)
c2.comment = comm
assert c2.comment.parent is c2
def test_remove_comment(dummy_cell):
comm = Comment('text', 'author')
dummy_cell.comment = comm
dummy_cell.comment = None
assert dummy_cell.comment is None
def test_cell_offset(dummy_cell):
cell = dummy_cell
assert cell.offset(2, 1).coordinate == 'B3'
class TestEncoding:
try:
# Python 2
pound = unichr(163)
except NameError:
# Python 3
pound = chr(163)
test_string = ('Compound Value (' + pound + ')').encode('latin1')
def test_bad_encoding(self):
from openpyxl import Workbook
wb = Workbook()
ws = wb.active
cell = ws['A1']
with pytest.raises(UnicodeDecodeError):
cell.check_string(self.test_string)
with pytest.raises(UnicodeDecodeError):
cell.value = self.test_string
def test_good_encoding(self):
from openpyxl import Workbook
wb = Workbook()
wb.encoding = 'latin1'
ws = wb.active
cell = ws['A1']
cell.value = self.test_string
def test_font(DummyWorksheet, Cell):
from openpyxl.styles import Font
font = Font(bold=True)
ws = DummyWorksheet
ws.parent._fonts.add(font)
cell = Cell(ws, row=1, column=1)
assert cell.font == font
def test_fill(DummyWorksheet, Cell):
from openpyxl.styles import PatternFill
fill = PatternFill(patternType="solid", fgColor="FF0000")
ws = DummyWorksheet
ws.parent._fills.add(fill)
cell = Cell(ws, column='A', row=1)
assert cell.fill == fill
def test_border(DummyWorksheet, Cell):
from openpyxl.styles import Border
border = Border()
ws = DummyWorksheet
ws.parent._borders.add(border)
cell = Cell(ws, column='A', row=1)
assert cell.border == border
def test_number_format(DummyWorksheet, Cell):
ws = DummyWorksheet
ws.parent._number_formats.add("dd--hh--mm")
cell = Cell(ws, column="A", row=1)
cell.number_format = "dd--hh--mm"
assert cell.number_format == "dd--hh--mm"
def test_alignment(DummyWorksheet, Cell):
from openpyxl.styles import Alignment
align = Alignment(wrapText=True)
ws = DummyWorksheet
ws.parent._alignments.add(align)
cell = Cell(ws, column="A", row=1)
assert cell.alignment == align
def test_protection(DummyWorksheet, Cell):
from openpyxl.styles import Protection
prot = Protection(locked=False)
ws = DummyWorksheet
ws.parent._protections.add(prot)
cell = Cell(ws, column="A", row=1)
assert cell.protection == prot
def test_pivot_button(DummyWorksheet, Cell):
ws = DummyWorksheet
cell = Cell(ws, column="A", row=1)
cell.style_id
cell._style.pivotButton = 1
assert cell.pivotButton is True
def test_quote_prefix(DummyWorksheet, Cell):
ws = DummyWorksheet
cell = Cell(ws, column="A", row=1)
cell.style_id
cell._style.quotePrefix = 1
assert cell.quotePrefix is True
def test_remove_hyperlink(dummy_cell):
"""Remove a cell hyperlink"""
cell = dummy_cell
cell.hyperlink = "http://test.com"
cell.hyperlink = None
assert cell.hyperlink is None
@pytest.fixture
def MergedCell():
from ..cell import MergedCell
return MergedCell(DummyWorksheet())
class TestMergedCell:
def test_value(self, MergedCell):
cell = MergedCell
assert cell._value is None
def test_data_type(self, MergedCell):
cell = MergedCell
assert cell.data_type == 'n'
def test_comment(self, MergedCell):
cell = MergedCell
assert cell.comment is None
def test_coordinate(self, MergedCell):
cell = MergedCell
cell.row = 1
cell.column = 1
assert cell.coordinate == "A1"
def test_repr(self, MergedCell):
cell = MergedCell
cell.row = 1
cell.column = 1
assert repr(cell) == "<MergedCell 'Dummy Worksheet'.A1>"
def test_hyperlink(self, MergedCell):
cell = MergedCell
assert cell.hyperlink is None | 0.564819 | 0.486514 |
from .tool.func import *
import random
def randomkeyy():
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
password = ""
for i in range(6):
index = random.randrange(len(alphabet))
password = password + alphabet[index]
return password
def login_need_email_2(conn, tool):
curs = conn.cursor()
if flask.request.method == 'POST':
re_set_list = ['c_id', 'c_pw', 'c_ans', 'c_que', 'c_key', 'c_type']
if tool == 'email_change':
flask.session['c_key'] = randomkeyy()
flask.session['c_id'] = ip_check()
flask.session['c_type'] = 'email_change'
elif tool == 'pass_find':
user_id = flask.request.form.get('id', '')
user_email = flask.request.form.get('email', '')
flask.session['c_key'] = randomkeyy()
flask.session['c_id'] = user_id
flask.session['c_type'] = 'pass_find'
else:
if not 'c_type' in flask.session:
return redirect('/register')
if tool != 'pass_find':
user_email = flask.request.form.get('email', '')
email_data = re.search(r'@([^@]+)$', user_email)
if email_data:
curs.execute(db_change("select html from html_filter where html = ? and kind = 'email'"), [email_data.group(1)])
if not curs.fetchall():
for i in re_set_list:
flask.session.pop(i, None)
return redirect('/email_filter')
else:
for i in re_set_list:
flask.session.pop(i, None)
return re_error('/error/36')
curs.execute(db_change('select data from other where name = "email_title"'))
sql_d = curs.fetchall()
t_text = html.escape(sql_d[0][0]) if sql_d and sql_d[0][0] != '' else (wiki_set()[0] + ' ์ธ์ฆ')
curs.execute(db_change('select data from other where name = "email_text"'))
sql_d = curs.fetchall()
i_text = (html.escape(sql_d[0][0]) + '\n\n๋ํ์ํค ์ธ์ฆ ์ฝ๋ : ' + flask.session['c_key']) if sql_d and sql_d[0][0] != '' else ('๋ํ์ํค ์ธ์ฆ ์ฝ๋: ' + flask.session['c_key'])
if tool == 'pass_find':
curs.execute(db_change("select id from user_set where id = ? and name = 'email' and data = ?"), [user_id, user_email])
if not curs.fetchall():
return re_error('/error/12')
if send_email(user_email, t_text, i_text) == 0:
return re_error('/error/18')
return redirect('/check_key')
else:
curs.execute(db_change('select id from user_set where name = "email" and data = ?'), [user_email])
if curs.fetchall():
for i in re_set_list:
flask.session.pop(i, None)
return re_error('/error/35')
if send_email(user_email, t_text, i_text) == 0:
for i in re_set_list:
flask.session.pop(i, None)
return re_error('/error/18')
flask.session['c_email'] = user_email
return redirect('/check_key')
else:
if tool == 'pass_find':
curs.execute(db_change('select data from other where name = "password_search_text"'))
sql_d = curs.fetchall()
b_text = (sql_d[0][0] + '<hr class="main_hr">') if sql_d and sql_d[0][0] != '' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('password_search'), wiki_set(), custom(), other2([0, 0])],
data = b_text + '''
<form method="post">
<input placeholder="''' + load_lang('id') + '''" name="id" type="text">
<hr class="main_hr">
<input placeholder="''' + load_lang('email') + '''" name="email" type="text">
<hr class="main_hr">
<button type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['user', load_lang('return')]]
))
else:
if tool == 'need_email' and not 'c_type' in flask.session:
return redirect('/register')
curs.execute(db_change('select data from other where name = "email_insert_text"'))
sql_d = curs.fetchall()
b_text = (sql_d[0][0] + '<hr class="main_hr">') if sql_d and sql_d[0][0] != '' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('email'), wiki_set(), custom(), other2([0, 0])],
data = '''
<a href="/email_filter">(''' + load_lang('email_filter_list') + ''')</a>
<hr class="main_hr">
''' + b_text + '''
<form method="post">
<input placeholder="''' + load_lang('email') + '''" name="email" type="text">
<hr class="main_hr">
<button type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['user', load_lang('return')]]
)) | route/login_need_email.py | from .tool.func import *
import random
def randomkeyy():
alphabet = "abcdefghijklmnopqrstuvwxyz0123456789"
password = ""
for i in range(6):
index = random.randrange(len(alphabet))
password = password + alphabet[index]
return password
def login_need_email_2(conn, tool):
curs = conn.cursor()
if flask.request.method == 'POST':
re_set_list = ['c_id', 'c_pw', 'c_ans', 'c_que', 'c_key', 'c_type']
if tool == 'email_change':
flask.session['c_key'] = randomkeyy()
flask.session['c_id'] = ip_check()
flask.session['c_type'] = 'email_change'
elif tool == 'pass_find':
user_id = flask.request.form.get('id', '')
user_email = flask.request.form.get('email', '')
flask.session['c_key'] = randomkeyy()
flask.session['c_id'] = user_id
flask.session['c_type'] = 'pass_find'
else:
if not 'c_type' in flask.session:
return redirect('/register')
if tool != 'pass_find':
user_email = flask.request.form.get('email', '')
email_data = re.search(r'@([^@]+)$', user_email)
if email_data:
curs.execute(db_change("select html from html_filter where html = ? and kind = 'email'"), [email_data.group(1)])
if not curs.fetchall():
for i in re_set_list:
flask.session.pop(i, None)
return redirect('/email_filter')
else:
for i in re_set_list:
flask.session.pop(i, None)
return re_error('/error/36')
curs.execute(db_change('select data from other where name = "email_title"'))
sql_d = curs.fetchall()
t_text = html.escape(sql_d[0][0]) if sql_d and sql_d[0][0] != '' else (wiki_set()[0] + ' ์ธ์ฆ')
curs.execute(db_change('select data from other where name = "email_text"'))
sql_d = curs.fetchall()
i_text = (html.escape(sql_d[0][0]) + '\n\n๋ํ์ํค ์ธ์ฆ ์ฝ๋ : ' + flask.session['c_key']) if sql_d and sql_d[0][0] != '' else ('๋ํ์ํค ์ธ์ฆ ์ฝ๋: ' + flask.session['c_key'])
if tool == 'pass_find':
curs.execute(db_change("select id from user_set where id = ? and name = 'email' and data = ?"), [user_id, user_email])
if not curs.fetchall():
return re_error('/error/12')
if send_email(user_email, t_text, i_text) == 0:
return re_error('/error/18')
return redirect('/check_key')
else:
curs.execute(db_change('select id from user_set where name = "email" and data = ?'), [user_email])
if curs.fetchall():
for i in re_set_list:
flask.session.pop(i, None)
return re_error('/error/35')
if send_email(user_email, t_text, i_text) == 0:
for i in re_set_list:
flask.session.pop(i, None)
return re_error('/error/18')
flask.session['c_email'] = user_email
return redirect('/check_key')
else:
if tool == 'pass_find':
curs.execute(db_change('select data from other where name = "password_search_text"'))
sql_d = curs.fetchall()
b_text = (sql_d[0][0] + '<hr class="main_hr">') if sql_d and sql_d[0][0] != '' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('password_search'), wiki_set(), custom(), other2([0, 0])],
data = b_text + '''
<form method="post">
<input placeholder="''' + load_lang('id') + '''" name="id" type="text">
<hr class="main_hr">
<input placeholder="''' + load_lang('email') + '''" name="email" type="text">
<hr class="main_hr">
<button type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['user', load_lang('return')]]
))
else:
if tool == 'need_email' and not 'c_type' in flask.session:
return redirect('/register')
curs.execute(db_change('select data from other where name = "email_insert_text"'))
sql_d = curs.fetchall()
b_text = (sql_d[0][0] + '<hr class="main_hr">') if sql_d and sql_d[0][0] != '' else ''
return easy_minify(flask.render_template(skin_check(),
imp = [load_lang('email'), wiki_set(), custom(), other2([0, 0])],
data = '''
<a href="/email_filter">(''' + load_lang('email_filter_list') + ''')</a>
<hr class="main_hr">
''' + b_text + '''
<form method="post">
<input placeholder="''' + load_lang('email') + '''" name="email" type="text">
<hr class="main_hr">
<button type="submit">''' + load_lang('save') + '''</button>
</form>
''',
menu = [['user', load_lang('return')]]
)) | 0.176743 | 0.07521 |
# coding: utf-8
from elasticsearch import Elasticsearch
class ElasticSearchClient(object):
def __init__(self):
self._es_client = None
def init_app(self, app):
if 'ES_HOST' in app.config and 'ES_PORT' in app.config:
self._es_client = Elasticsearch([
{
'host': app.config['ES_HOST'],
'port': app.config['ES_PORT']
}])
def search(self, *args, **kwargs):
return self._es_client.search(*args, **kwargs)
def query_log(self, index, keyword, pod_name, start_time, end_time,
match_phrase=None):
query_body = {
'version': True,
'size': 8000,
'sort': [
{'@timestamp': 'desc'},
{
'log.offset': {
'order': 'desc',
'unmapped_type': 'long'
}
}
],
'_source': ['message'],
'query': {
'bool': {
'must': []
}
}
}
keyword_list = [{
'query_string': {
'query': keyword,
'analyze_wildcard': True,
'default_operator': 'AND',
'default_field': '*'
}
}] if keyword else []
match_phrase_list = [
match_phrase if match_phrase else
{
'prefix': {
'kubernetes.pod.name': pod_name
}
},
{
'range': {
'@timestamp': {
'gte': start_time,
'lte': end_time,
'format': 'epoch_millis'
}
}
}
]
query_body['query']['bool']['must'] = keyword_list + match_phrase_list
response = self._es_client.search(index=index, body=query_body)
return [item['_source']['message'] for item in response['hits']['hits']]
def query_data_join_metrics(self, job_name, num_buckets):
STAT_AGG = {
"JOINED": {
"filter": {
"term": {
"joined": True
}
}
},
"FAKE": {
"filter": {
"term": {
"fake": True
}
}
},
"TOTAL": {
"filter": {
"term": {
"fake": False
}
}
},
"UNJOINED": {
"bucket_script": {
"buckets_path": {
"JOINED": "JOINED[_count]",
"TOTAL": "TOTAL[_count]"
},
"script": "params.TOTAL - params.JOINED"
}
},
"JOIN_RATE": {
"bucket_script": {
"buckets_path": {
"JOINED": "JOINED[_count]",
"TOTAL": "TOTAL[_count]",
"FAKE": "FAKE[_count]"
},
"script": "params.JOINED / (params.TOTAL + params.FAKE)"
}
}
}
query = {
"size": 0,
"query": {
"bool": {
"must": [
{"term": {"application_id": job_name}}
]
}
},
"aggs": {
"OVERALL": {
"terms": {
"field": "application_id"
},
"aggs": STAT_AGG
},
"EVENT_TIME": {
"auto_date_histogram": {
"field": "event_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": STAT_AGG
},
"PROCESS_TIME": {
"auto_date_histogram": {
"field": "process_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": {
"MAX_EVENT_TIME": {
"max": {
"field": "event_time",
"format": "strict_date_optional_time"
}
},
"MIN_EVENT_TIME": {
"min": {
"field": "event_time",
"format": "strict_date_optional_time"
}
}
}
}
}
}
return es.search(index='data_join*', body=query)
def query_nn_metrics(self, job_name, num_buckets):
query = {
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"tags.application_id.keyword": job_name
}
}
]
}
},
"aggs": {
"PROCESS_TIME": {
"auto_date_histogram": {
"field": "date_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": {
"AUC": {
"filter": {
"term": {"name": "auc"}
},
"aggs": {
"AUC": {
"avg": {
"field": "value"
}
}
}
},
}
}
}
}
return es.search(index='metrics*', body=query)
def query_events(self, index, keyword, pod_name,
start_time, end_time):
query_body = {
'version': True,
'size': 8000,
'sort': [
{'@timestamp': 'desc'},
{
'log.offset': {
'order': 'desc',
'unmapped_type': 'long'
}
}
],
'_source': ['message'],
'query': {
'bool': {
'must': []
}
}
}
keyword_list = [
{
'query_string': {
'query': f'{keyword} AND Event',
'analyze_wildcard': True,
'default_operator': 'AND',
'default_field': '*'
}
}
] if keyword else []
match_phrase_list = [
{
'prefix': {
'kubernetes.pod.name': pod_name
}
},
{
'range': {
'@timestamp': {
'gte': start_time,
'lte': end_time,
'format': 'epoch_millis'
}
}
}
]
query_body['query']['bool']['must'] = keyword_list + match_phrase_list
response = self._es_client.search(index=index, body=query_body)
return [item['_source']['message'] for item in response['hits']['hits']]
es = ElasticSearchClient() | web_console_v2/api/fedlearner_webconsole/utils/es.py |
# coding: utf-8
from elasticsearch import Elasticsearch
class ElasticSearchClient(object):
def __init__(self):
self._es_client = None
def init_app(self, app):
if 'ES_HOST' in app.config and 'ES_PORT' in app.config:
self._es_client = Elasticsearch([
{
'host': app.config['ES_HOST'],
'port': app.config['ES_PORT']
}])
def search(self, *args, **kwargs):
return self._es_client.search(*args, **kwargs)
def query_log(self, index, keyword, pod_name, start_time, end_time,
match_phrase=None):
query_body = {
'version': True,
'size': 8000,
'sort': [
{'@timestamp': 'desc'},
{
'log.offset': {
'order': 'desc',
'unmapped_type': 'long'
}
}
],
'_source': ['message'],
'query': {
'bool': {
'must': []
}
}
}
keyword_list = [{
'query_string': {
'query': keyword,
'analyze_wildcard': True,
'default_operator': 'AND',
'default_field': '*'
}
}] if keyword else []
match_phrase_list = [
match_phrase if match_phrase else
{
'prefix': {
'kubernetes.pod.name': pod_name
}
},
{
'range': {
'@timestamp': {
'gte': start_time,
'lte': end_time,
'format': 'epoch_millis'
}
}
}
]
query_body['query']['bool']['must'] = keyword_list + match_phrase_list
response = self._es_client.search(index=index, body=query_body)
return [item['_source']['message'] for item in response['hits']['hits']]
def query_data_join_metrics(self, job_name, num_buckets):
STAT_AGG = {
"JOINED": {
"filter": {
"term": {
"joined": True
}
}
},
"FAKE": {
"filter": {
"term": {
"fake": True
}
}
},
"TOTAL": {
"filter": {
"term": {
"fake": False
}
}
},
"UNJOINED": {
"bucket_script": {
"buckets_path": {
"JOINED": "JOINED[_count]",
"TOTAL": "TOTAL[_count]"
},
"script": "params.TOTAL - params.JOINED"
}
},
"JOIN_RATE": {
"bucket_script": {
"buckets_path": {
"JOINED": "JOINED[_count]",
"TOTAL": "TOTAL[_count]",
"FAKE": "FAKE[_count]"
},
"script": "params.JOINED / (params.TOTAL + params.FAKE)"
}
}
}
query = {
"size": 0,
"query": {
"bool": {
"must": [
{"term": {"application_id": job_name}}
]
}
},
"aggs": {
"OVERALL": {
"terms": {
"field": "application_id"
},
"aggs": STAT_AGG
},
"EVENT_TIME": {
"auto_date_histogram": {
"field": "event_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": STAT_AGG
},
"PROCESS_TIME": {
"auto_date_histogram": {
"field": "process_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": {
"MAX_EVENT_TIME": {
"max": {
"field": "event_time",
"format": "strict_date_optional_time"
}
},
"MIN_EVENT_TIME": {
"min": {
"field": "event_time",
"format": "strict_date_optional_time"
}
}
}
}
}
}
return es.search(index='data_join*', body=query)
def query_nn_metrics(self, job_name, num_buckets):
query = {
"size": 0,
"query": {
"bool": {
"must": [
{
"term": {
"tags.application_id.keyword": job_name
}
}
]
}
},
"aggs": {
"PROCESS_TIME": {
"auto_date_histogram": {
"field": "date_time",
"format": "strict_date_optional_time",
"buckets": num_buckets
},
"aggs": {
"AUC": {
"filter": {
"term": {"name": "auc"}
},
"aggs": {
"AUC": {
"avg": {
"field": "value"
}
}
}
},
}
}
}
}
return es.search(index='metrics*', body=query)
def query_events(self, index, keyword, pod_name,
start_time, end_time):
query_body = {
'version': True,
'size': 8000,
'sort': [
{'@timestamp': 'desc'},
{
'log.offset': {
'order': 'desc',
'unmapped_type': 'long'
}
}
],
'_source': ['message'],
'query': {
'bool': {
'must': []
}
}
}
keyword_list = [
{
'query_string': {
'query': f'{keyword} AND Event',
'analyze_wildcard': True,
'default_operator': 'AND',
'default_field': '*'
}
}
] if keyword else []
match_phrase_list = [
{
'prefix': {
'kubernetes.pod.name': pod_name
}
},
{
'range': {
'@timestamp': {
'gte': start_time,
'lte': end_time,
'format': 'epoch_millis'
}
}
}
]
query_body['query']['bool']['must'] = keyword_list + match_phrase_list
response = self._es_client.search(index=index, body=query_body)
return [item['_source']['message'] for item in response['hits']['hits']]
es = ElasticSearchClient() | 0.486088 | 0.257281 |
import collections
import redis
import numpy as np
from representations import StateAction
import random
from textworld.core import EnvInfos
GraphInfo = collections.namedtuple('GraphInfo', 'objs, ob_rep, act_rep, graph_state, graph_state_rep, admissible_actions, admissible_actions_rep')
def load_vocab(env):
with open('/content/JW-KG-A2C/kga2c/coin_hard_vocab.txt') as f:
dic = f.read().splitlines()
vocab = {i+2: str(v) for i, v in enumerate(dic)}
vocab[0] = ' '
vocab[1] = '<s>'
vocab_rev = {v: i for i, v in vocab.items()}
return vocab, vocab_rev
def clean_obs(s):
garbage_chars = ['*', '-', '!', '[', ']']
for c in garbage_chars:
s = s.replace(c, ' ')
return s.strip()
import jericho
import textworld
import re
from textworld.generator.game import GameProgression
from textworld.generator.inform7 import Inform7Game
from collections import defaultdict
from jericho.util import clean, extract_objs
from jericho import defines
class TemplateActionGeneratorJeri:
'''
Generates actions using the template-action-space.
:param rom_bindings: Game-specific bindings from :meth:`jericho.FrotzEnv.bindings`.
:type rom_bindings: Dictionary
'''
def __init__(self, rom_bindings):
self.rom_bindings = rom_bindings
grammar = rom_bindings['grammar'].split(';')
max_word_length = rom_bindings['max_word_length']
self.templates = self._preprocess_templates(grammar, max_word_length)
# Enchanter and Spellbreaker only recognize abbreviated directions
if rom_bindings['name'] in ['enchanter', 'spellbrkr', 'murdac']:
for act in ['northeast','northwest','southeast','southwest']:
self.templates.remove(act)
self.templates.extend(['ne','nw','se','sw'])
def _preprocess_templates(self, templates, max_word_length):
'''
Converts templates with multiple verbs and takes the first verb.
'''
out = []
vb_usage_fn = lambda verb: verb_usage_count(verb, max_word_length)
p = re.compile(r'\S+(/\S+)+')
for template in templates:
if not template:
continue
while True:
match = p.search(template)
if not match:
break
verb = max(match.group().split('/'), key=vb_usage_fn)
template = template[:match.start()] + verb + template[match.end():]
ts = template.split()
out.append(template)
return out
def generate_actions(self, objs):
'''
Given a list of objects present at the current location, returns
a list of possible actions. This list represents all combinations
of templates filled with the provided objects.
:param objs: Candidate interactive objects present at the current location.
:type objs: List of strings
:returns: List of action-strings.
:Example:
>>> import jericho
>>> env = jericho.FrotzEnv(rom_path)
>>> interactive_objs = ['phone', 'keys', 'wallet']
>>> env.act_gen.generate_actions(interactive_objs)
['wake', 'wake up', 'wash', ..., 'examine wallet', 'remove phone', 'taste keys']
'''
actions = []
for template in self.templates:
holes = template.count('OBJ')
if holes <= 0:
actions.append(template)
elif holes == 1:
actions.extend([template.replace('OBJ', obj) for obj in objs])
elif holes == 2:
for o1 in objs:
for o2 in objs:
if o1 != o2:
actions.append(template.replace('OBJ', o1, 1).replace('OBJ', o2, 1))
return actions
def generate_template_actions(self, objs, obj_ids):
'''
Given a list of objects and their corresponding vocab_ids, returns
a list of possible TemplateActions. This list represents all combinations
of templates filled with the provided objects.
:param objs: Candidate interactive objects present at the current location.
:type objs: List of strings
:param obj_ids: List of ids corresponding to the tokens of each object.
:type obj_ids: List of int
:returns: List of :class:`jericho.defines.TemplateAction`.
:Example:
>>> import jericho
>>> env = jericho.FrotzEnv(rom_path)
>>> interactive_objs = ['phone', 'keys', 'wallet']
>>> interactive_obj_ids = [718, 325, 64]
>>> env.act_gen.generate_template_actions(interactive_objs, interactive_obj_ids)
[
TemplateAction(action='wake', template_id=0, obj_ids=[]),
TemplateAction(action='wake up', template_id=1, obj_ids=[]),
...
TemplateAction(action='turn phone on', template_id=55, obj_ids=[718]),
TemplateAction(action='put wallet on keys', template_id=65, obj_ids=[64, 325])
]
'''
assert len(objs) == len(obj_ids)
actions = []
for template_idx, template in enumerate(self.templates):
holes = template.count('OBJ')
if holes <= 0:
actions.append(defines.TemplateAction(template, template_idx, []))
elif holes == 1:
for noun, noun_id in zip(objs, obj_ids):
actions.append(
defines.TemplateAction(template.replace('OBJ', noun),
template_idx, [noun_id]))
elif holes == 2:
for o1, o1_id in zip(objs, obj_ids):
for o2, o2_id in zip(objs, obj_ids):
if o1 != o2:
actions.append(
defines.TemplateAction(
template.replace('OBJ', o1, 1).replace('OBJ', o2, 1),
template_idx, [o1_id, o2_id]))
return actions
def _load_bindings_from_tw(state, story_file, seed):
bindings = {}
g1 = [re.sub('{.*?}', 'OBJ', s) for s in state.command_templates]
g = list(set([re.sub('go .*', 'go OBJ', s) for s in g1]))
'''g.remove('look')
g.remove('inventory')
g.remove('examine OBJ')
g.remove('drop OBJ')'''
bindings['grammar'] = ';'.join(g)
bindings['max_word_length'] = len(max(state.verbs + state.entities, key=len))
bindings['minimal_actions'] = '/'.join(state['extra.walkthrough'])
bindings['name'] = state['extra.desc']
bindings['rom'] = story_file.split('/')[-1]
bindings['seed'] = seed
bindings['walkthrough'] = bindings['minimal_actions']
return bindings
class JeriWorld:
def __init__(self, story_file, seed=None, style='jericho', infos = None):
self.jeri_style = style.lower() == 'jericho'
if self.jeri_style:
self._env = textworld.start(story_file, infos=infos)
state = self._env.reset()
self.tw_games = True
self._seed = seed
self.bindings = None
if state.command_templates is None:
self.tw_games = False
del self._env
self._env = jericho.FrotzEnv(story_file, seed)
self.bindings = self._env.bindings
self._world_changed = self._env._world_changed
self.act_gen = self._env.act_gen
else:
self.bindings = _load_bindings_from_tw(state, story_file, seed)
self._world_changed = self._env._jericho._world_changed
self.act_gen = TemplateActionGeneratorJeri(self.bindings)
self.seed(seed)
else:
self._env = textworld.start(story_file, infos=infos)
def __del__(self):
del self._env
def reset(self):
if self.jeri_style:
if self.tw_games:
state = self._env.reset()
raw = state['description']
return raw, {'moves':state.moves, 'score':state.score}
return self._env.reset()
else:
return self._env.reset()
def load(self, story_file, seed=None):
if self.jeri_style:
if self.tw_games:
self._env.load(story_file)
else:
self._env.load(story_file, seed)
else:
self._env.load(story_file)
def step(self, action):
if self.jeri_style:
if self.tw_games:
next_state = self._env.step(action)[0]
self._world_changed = self._env._jericho._world_changed
return next_state.description, next_state['intermediate_reward'], (next_state.lost or next_state.won),\
{'moves':next_state.moves, 'score':next_state.score}
else:
self._world_changed = self._env._world_changed
return self._env.step(action)
else:
return self._env.step(action)
def bindings(self):
if self.jeri_style:
return self.bindings
else:
return None
def _emulator_halted(self):
if self.jeri_style:
if self.tw_games:
return self._env._env._emulator_halted()
return self._env._emulator_halted()
else:
return None
def game_over(self):
if self.jeri_style:
if self.tw_games:
self._env.state['lost']
return self._env.game_over()
else:
return None
def victory(self):
if self.jeri_style:
if self.tw_games:
self._env.state['won']
return self._env.victory()
else:
return None
def seed(self, seed=None):
if self.jeri_style:
self._seed = seed
return self._env.seed(seed)
else:
return None
def close(self):
if self.jeri_style:
self._env.close()
else:
pass
def copy(self):
return self._env.copy()
def get_walkthrough(self):
if self.jeri_style:
if self.tw_games:
return self._env.state['extra.walkthrough']
return self._env.get_walkthrough()
else:
return None
def get_score(self):
if self.jeri_style:
if self.tw_games:
return self._env.state['score']
return self._env.get_score()
else:
return None
def get_dictionary(self):
if self.jeri_style:
if self.tw_games:
state = self._env.state
return state.entities + state.verbs
return self._env.get_dictionary()
else:
state = self._env.state
return state.entities + state.verbs
def get_state(self):
if self.jeri_style:
if self.tw_games:
return self._env._jericho.get_state()
return self._env.get_state
else:
return None
def set_state(self, state):
if self.jeri_style:
if self.tw_games:
self._env._jericho.set_state(state)
else:
self._env.get_state
else:
pass
def get_valid_actions(self, use_object_tree=True, use_ctypes=True, use_parallel=True):
if self.jeri_style:
if self.tw_games:
return self._env.state['admissible_commands']
return self._env.get_valid_actions(use_object_tree, use_ctypes, use_parallel)
else:
pass
def _identify_interactive_objects(self, observation='', use_object_tree=False):
"""
Identifies objects in the current location and inventory that are likely
to be interactive.
:param observation: (optional) narrative response to the last action, used to extract candidate objects.
:type observation: string
:param use_object_tree: Query the :doc:`object_tree` for names of surrounding objects.
:type use_object_tree: boolean
:returns: A list-of-lists containing the name(s) for each interactive object.
:Example:
>>> from jericho import *
>>> env = FrotzEnv('zork1.z5')
>>> obs, info = env.reset()
'You are standing in an open field west of a white house with a boarded front door. There is a small mailbox here.'
>>> env.identify_interactive_objects(obs)
[['mailbox', 'small'], ['boarded', 'front', 'door'], ['white', 'house']]
.. note:: Many objects may be referred to in a variety of ways, such as\
Zork1's brass latern which may be referred to either as *brass* or *lantern*.\
This method groups all such aliases together into a list for each object.
"""
if self.jeri_style:
if self.tw_games:
objs = set()
state = self.get_state()
if observation:
# Extract objects from observation
obs_objs = extract_objs(observation)
obs_objs = [o + ('OBS',) for o in obs_objs]
objs = objs.union(obs_objs)
# Extract objects from location description
self.set_state(state)
look = clean(self.step('look')[0])
look_objs = extract_objs(look)
look_objs = [o + ('LOC',) for o in look_objs]
objs = objs.union(look_objs)
# Extract objects from inventory description
self.set_state(state)
inv = clean(self.step('inventory')[0])
inv_objs = extract_objs(inv)
inv_objs = [o + ('INV',) for o in inv_objs]
objs = objs.union(inv_objs)
self.set_state(state)
# Filter out the objects that aren't in the dictionary
dict_words = [w for w in self.get_dictionary()]
max_word_length = max([len(w) for w in dict_words])
to_remove = set()
for obj in objs:
if len(obj[0].split()) > 1:
continue
if obj[0][:max_word_length] not in dict_words:
to_remove.add(obj)
objs.difference_update(to_remove)
objs_set = set()
for obj in objs:
if obj[0] not in objs_set:
objs_set.add(obj[0])
return objs_set
return self._env._identify_interactive_objects(observation=observation, use_object_tree=use_object_tree)
else:
return None
def find_valid_actions(self, possible_acts=None):
if self.jeri_style:
if self.tw_games:
diff2acts = {}
state = self.get_state()
candidate_actions = self.get_valid_actions()
for act in candidate_actions:
self.set_state(state)
self.step(act)
diff = self._env._jericho._get_world_diff()
if diff in diff2acts:
if act not in diff2acts[diff]:
diff2acts[diff].append(act)
else:
diff2acts[diff] = [act]
self.set_state(state)
return diff2acts
else:
admissible = []
candidate_acts = self._env._filter_candidate_actions(possible_acts).values()
true_actions = self._env.get_valid_actions()
for temp_list in candidate_acts:
for template in temp_list:
if template.action in true_actions:
admissible.append(template)
return admissible
else:
return None
def _score_object_names(self, interactive_objs):
""" Attempts to choose a sensible name for an object, typically a noun. """
if self.jeri_style:
def score_fn(obj):
score = -.01 * len(obj[0])
if obj[1] == 'NOUN':
score += 1
if obj[1] == 'PROPN':
score += .5
if obj[1] == 'ADJ':
score += 0
if obj[2] == 'OBJTREE':
score += .1
return score
best_names = []
for desc, objs in interactive_objs.items():
sorted_objs = sorted(objs, key=score_fn, reverse=True)
best_names.append(sorted_objs[0][0])
return best_names
else:
return None
def get_world_state_hash(self):
if self.jeri_style:
if self.tw_games:
return None
else:
return self._env.get_world_state_hash()
else:
return None
class KGA2CEnv:
'''
KGA2C environment performs additional graph-based processing.
'''
def __init__(self, rom_path, seed, spm_model, tsv_file, step_limit=None, stuck_steps=10, gat=True):
random.seed(seed)
np.random.seed(seed)
self.rom_path = rom_path
self.seed = seed
self.episode_steps = 0
self.stuck_steps = 0
self.valid_steps = 0
self.spm_model = spm_model
self.tsv_file = tsv_file
self.step_limit = step_limit
self.max_stuck_steps = stuck_steps
self.gat = gat
self.env = None
self.conn_valid = None
self.conn_openie = None
self.vocab = None
self.vocab_rev = None
self.state_rep = None
def create(self):
''' Create the Jericho environment and connect to redis. '''
infos = EnvInfos(admissible_commands=True, description=True, intermediate_reward=True)
self.env = JeriWorld(self.rom_path, self.seed, infos=infos)
self.bindings = self.env.bindings
self.act_gen = self.env.act_gen
self.max_word_len = self.bindings['max_word_length']
self.vocab, self.vocab_rev = load_vocab(self.env)
self.conn_valid = redis.Redis(host='localhost', port=6379, db=0)
self.conn_openie = redis.Redis(host='localhost', port=6379, db=1)
def _get_admissible_actions(self, objs):
''' Queries Redis for a list of admissible actions from the current state. '''
obj_ids = [self.vocab_rev[o[:self.max_word_len]] for o in objs]
possible_acts = self.act_gen.generate_template_actions(objs, obj_ids)
admissible = []
true_actions = self.env.get_valid_actions()
for template in possible_acts:
if template.action in true_actions:
admissible.append(template)
return admissible
def _build_graph_rep(self, action, ob_r):
''' Returns various graph-based representations of the current state. '''
#objs = [o[0] for o in self.env._identify_interactive_objects(ob_r)]
objs = list(self.env._identify_interactive_objects(ob_r))
admissible_actions = self._get_admissible_actions(objs)
admissible_actions_rep = [self.state_rep.get_action_rep_drqa(a.action) \
for a in admissible_actions] \
if admissible_actions else [[0] * 20]
try: # Gather additional information about the new state
save_state = self.env.get_state()
ob_l = self.env.step('look')[0]
self.env.set_state(save_state)
ob_i = self.env.step('inventory')[0]
self.env.set_state(save_state)
except RuntimeError:
print('RuntimeError: {}, Done: {}, Info: {}'.format(clean_obs(ob_r), done, info))
ob_l = ob_i = ''
ob_rep = self.state_rep.get_obs_rep(ob_l, ob_i, ob_r, action)
cleaned_obs = clean_obs(ob_l + ' ' + ob_r)
openie_cache = self.conn_openie.get(cleaned_obs)
if openie_cache is None:
rules, tocache = self.state_rep.step(cleaned_obs, ob_i, objs, action, cache=None, gat=self.gat)
self.conn_openie.set(cleaned_obs, str(tocache))
else:
openie_cache = eval(openie_cache.decode('cp1252'))
rules, _ = self.state_rep.step(cleaned_obs, ob_i, objs, action, cache=openie_cache, gat=self.gat)
graph_state = self.state_rep.graph_state
graph_state_rep = self.state_rep.graph_state_rep
action_rep = self.state_rep.get_action_rep_drqa(action)
return GraphInfo(objs, ob_rep, action_rep, graph_state, graph_state_rep,\
admissible_actions, admissible_actions_rep)
def step(self, action):
self.episode_steps += 1
obs, reward, done, info = self.env.step(action)
info['valid'] = self.env._world_changed() or done
info['steps'] = self.episode_steps
if info['valid']:
self.valid_steps += 1
self.stuck_steps = 0
else:
self.stuck_steps += 1
if (self.step_limit and self.valid_steps >= self.step_limit) \
or self.stuck_steps > self.max_stuck_steps:
done = True
if done:
graph_info = GraphInfo(objs=['all'],
ob_rep=self.state_rep.get_obs_rep(obs, obs, obs, action),
act_rep=self.state_rep.get_action_rep_drqa(action),
graph_state=self.state_rep.graph_state,
graph_state_rep=self.state_rep.graph_state_rep,
admissible_actions=[],
admissible_actions_rep=[])
else:
graph_info = self._build_graph_rep(action, obs)
return obs, reward, done, info, graph_info
def reset(self):
self.state_rep = StateAction(self.spm_model, self.vocab, self.vocab_rev,
self.tsv_file, self.max_word_len)
self.stuck_steps = 0
self.valid_steps = 0
self.episode_steps = 0
obs, info = self.env.reset()
info['valid'] = False
info['steps'] = 0
graph_info = self._build_graph_rep('look', obs)
return obs, info, graph_info
def close(self):
self.env.close() | kga2c/env.py | import collections
import redis
import numpy as np
from representations import StateAction
import random
from textworld.core import EnvInfos
GraphInfo = collections.namedtuple('GraphInfo', 'objs, ob_rep, act_rep, graph_state, graph_state_rep, admissible_actions, admissible_actions_rep')
def load_vocab(env):
with open('/content/JW-KG-A2C/kga2c/coin_hard_vocab.txt') as f:
dic = f.read().splitlines()
vocab = {i+2: str(v) for i, v in enumerate(dic)}
vocab[0] = ' '
vocab[1] = '<s>'
vocab_rev = {v: i for i, v in vocab.items()}
return vocab, vocab_rev
def clean_obs(s):
garbage_chars = ['*', '-', '!', '[', ']']
for c in garbage_chars:
s = s.replace(c, ' ')
return s.strip()
import jericho
import textworld
import re
from textworld.generator.game import GameProgression
from textworld.generator.inform7 import Inform7Game
from collections import defaultdict
from jericho.util import clean, extract_objs
from jericho import defines
class TemplateActionGeneratorJeri:
'''
Generates actions using the template-action-space.
:param rom_bindings: Game-specific bindings from :meth:`jericho.FrotzEnv.bindings`.
:type rom_bindings: Dictionary
'''
def __init__(self, rom_bindings):
self.rom_bindings = rom_bindings
grammar = rom_bindings['grammar'].split(';')
max_word_length = rom_bindings['max_word_length']
self.templates = self._preprocess_templates(grammar, max_word_length)
# Enchanter and Spellbreaker only recognize abbreviated directions
if rom_bindings['name'] in ['enchanter', 'spellbrkr', 'murdac']:
for act in ['northeast','northwest','southeast','southwest']:
self.templates.remove(act)
self.templates.extend(['ne','nw','se','sw'])
def _preprocess_templates(self, templates, max_word_length):
'''
Converts templates with multiple verbs and takes the first verb.
'''
out = []
vb_usage_fn = lambda verb: verb_usage_count(verb, max_word_length)
p = re.compile(r'\S+(/\S+)+')
for template in templates:
if not template:
continue
while True:
match = p.search(template)
if not match:
break
verb = max(match.group().split('/'), key=vb_usage_fn)
template = template[:match.start()] + verb + template[match.end():]
ts = template.split()
out.append(template)
return out
def generate_actions(self, objs):
'''
Given a list of objects present at the current location, returns
a list of possible actions. This list represents all combinations
of templates filled with the provided objects.
:param objs: Candidate interactive objects present at the current location.
:type objs: List of strings
:returns: List of action-strings.
:Example:
>>> import jericho
>>> env = jericho.FrotzEnv(rom_path)
>>> interactive_objs = ['phone', 'keys', 'wallet']
>>> env.act_gen.generate_actions(interactive_objs)
['wake', 'wake up', 'wash', ..., 'examine wallet', 'remove phone', 'taste keys']
'''
actions = []
for template in self.templates:
holes = template.count('OBJ')
if holes <= 0:
actions.append(template)
elif holes == 1:
actions.extend([template.replace('OBJ', obj) for obj in objs])
elif holes == 2:
for o1 in objs:
for o2 in objs:
if o1 != o2:
actions.append(template.replace('OBJ', o1, 1).replace('OBJ', o2, 1))
return actions
def generate_template_actions(self, objs, obj_ids):
'''
Given a list of objects and their corresponding vocab_ids, returns
a list of possible TemplateActions. This list represents all combinations
of templates filled with the provided objects.
:param objs: Candidate interactive objects present at the current location.
:type objs: List of strings
:param obj_ids: List of ids corresponding to the tokens of each object.
:type obj_ids: List of int
:returns: List of :class:`jericho.defines.TemplateAction`.
:Example:
>>> import jericho
>>> env = jericho.FrotzEnv(rom_path)
>>> interactive_objs = ['phone', 'keys', 'wallet']
>>> interactive_obj_ids = [718, 325, 64]
>>> env.act_gen.generate_template_actions(interactive_objs, interactive_obj_ids)
[
TemplateAction(action='wake', template_id=0, obj_ids=[]),
TemplateAction(action='wake up', template_id=1, obj_ids=[]),
...
TemplateAction(action='turn phone on', template_id=55, obj_ids=[718]),
TemplateAction(action='put wallet on keys', template_id=65, obj_ids=[64, 325])
]
'''
assert len(objs) == len(obj_ids)
actions = []
for template_idx, template in enumerate(self.templates):
holes = template.count('OBJ')
if holes <= 0:
actions.append(defines.TemplateAction(template, template_idx, []))
elif holes == 1:
for noun, noun_id in zip(objs, obj_ids):
actions.append(
defines.TemplateAction(template.replace('OBJ', noun),
template_idx, [noun_id]))
elif holes == 2:
for o1, o1_id in zip(objs, obj_ids):
for o2, o2_id in zip(objs, obj_ids):
if o1 != o2:
actions.append(
defines.TemplateAction(
template.replace('OBJ', o1, 1).replace('OBJ', o2, 1),
template_idx, [o1_id, o2_id]))
return actions
def _load_bindings_from_tw(state, story_file, seed):
bindings = {}
g1 = [re.sub('{.*?}', 'OBJ', s) for s in state.command_templates]
g = list(set([re.sub('go .*', 'go OBJ', s) for s in g1]))
'''g.remove('look')
g.remove('inventory')
g.remove('examine OBJ')
g.remove('drop OBJ')'''
bindings['grammar'] = ';'.join(g)
bindings['max_word_length'] = len(max(state.verbs + state.entities, key=len))
bindings['minimal_actions'] = '/'.join(state['extra.walkthrough'])
bindings['name'] = state['extra.desc']
bindings['rom'] = story_file.split('/')[-1]
bindings['seed'] = seed
bindings['walkthrough'] = bindings['minimal_actions']
return bindings
class JeriWorld:
def __init__(self, story_file, seed=None, style='jericho', infos = None):
self.jeri_style = style.lower() == 'jericho'
if self.jeri_style:
self._env = textworld.start(story_file, infos=infos)
state = self._env.reset()
self.tw_games = True
self._seed = seed
self.bindings = None
if state.command_templates is None:
self.tw_games = False
del self._env
self._env = jericho.FrotzEnv(story_file, seed)
self.bindings = self._env.bindings
self._world_changed = self._env._world_changed
self.act_gen = self._env.act_gen
else:
self.bindings = _load_bindings_from_tw(state, story_file, seed)
self._world_changed = self._env._jericho._world_changed
self.act_gen = TemplateActionGeneratorJeri(self.bindings)
self.seed(seed)
else:
self._env = textworld.start(story_file, infos=infos)
def __del__(self):
del self._env
def reset(self):
if self.jeri_style:
if self.tw_games:
state = self._env.reset()
raw = state['description']
return raw, {'moves':state.moves, 'score':state.score}
return self._env.reset()
else:
return self._env.reset()
def load(self, story_file, seed=None):
if self.jeri_style:
if self.tw_games:
self._env.load(story_file)
else:
self._env.load(story_file, seed)
else:
self._env.load(story_file)
def step(self, action):
if self.jeri_style:
if self.tw_games:
next_state = self._env.step(action)[0]
self._world_changed = self._env._jericho._world_changed
return next_state.description, next_state['intermediate_reward'], (next_state.lost or next_state.won),\
{'moves':next_state.moves, 'score':next_state.score}
else:
self._world_changed = self._env._world_changed
return self._env.step(action)
else:
return self._env.step(action)
def bindings(self):
if self.jeri_style:
return self.bindings
else:
return None
def _emulator_halted(self):
if self.jeri_style:
if self.tw_games:
return self._env._env._emulator_halted()
return self._env._emulator_halted()
else:
return None
def game_over(self):
if self.jeri_style:
if self.tw_games:
self._env.state['lost']
return self._env.game_over()
else:
return None
def victory(self):
if self.jeri_style:
if self.tw_games:
self._env.state['won']
return self._env.victory()
else:
return None
def seed(self, seed=None):
if self.jeri_style:
self._seed = seed
return self._env.seed(seed)
else:
return None
def close(self):
if self.jeri_style:
self._env.close()
else:
pass
def copy(self):
return self._env.copy()
def get_walkthrough(self):
if self.jeri_style:
if self.tw_games:
return self._env.state['extra.walkthrough']
return self._env.get_walkthrough()
else:
return None
def get_score(self):
if self.jeri_style:
if self.tw_games:
return self._env.state['score']
return self._env.get_score()
else:
return None
def get_dictionary(self):
if self.jeri_style:
if self.tw_games:
state = self._env.state
return state.entities + state.verbs
return self._env.get_dictionary()
else:
state = self._env.state
return state.entities + state.verbs
def get_state(self):
if self.jeri_style:
if self.tw_games:
return self._env._jericho.get_state()
return self._env.get_state
else:
return None
def set_state(self, state):
if self.jeri_style:
if self.tw_games:
self._env._jericho.set_state(state)
else:
self._env.get_state
else:
pass
def get_valid_actions(self, use_object_tree=True, use_ctypes=True, use_parallel=True):
if self.jeri_style:
if self.tw_games:
return self._env.state['admissible_commands']
return self._env.get_valid_actions(use_object_tree, use_ctypes, use_parallel)
else:
pass
def _identify_interactive_objects(self, observation='', use_object_tree=False):
"""
Identifies objects in the current location and inventory that are likely
to be interactive.
:param observation: (optional) narrative response to the last action, used to extract candidate objects.
:type observation: string
:param use_object_tree: Query the :doc:`object_tree` for names of surrounding objects.
:type use_object_tree: boolean
:returns: A list-of-lists containing the name(s) for each interactive object.
:Example:
>>> from jericho import *
>>> env = FrotzEnv('zork1.z5')
>>> obs, info = env.reset()
'You are standing in an open field west of a white house with a boarded front door. There is a small mailbox here.'
>>> env.identify_interactive_objects(obs)
[['mailbox', 'small'], ['boarded', 'front', 'door'], ['white', 'house']]
.. note:: Many objects may be referred to in a variety of ways, such as\
Zork1's brass latern which may be referred to either as *brass* or *lantern*.\
This method groups all such aliases together into a list for each object.
"""
if self.jeri_style:
if self.tw_games:
objs = set()
state = self.get_state()
if observation:
# Extract objects from observation
obs_objs = extract_objs(observation)
obs_objs = [o + ('OBS',) for o in obs_objs]
objs = objs.union(obs_objs)
# Extract objects from location description
self.set_state(state)
look = clean(self.step('look')[0])
look_objs = extract_objs(look)
look_objs = [o + ('LOC',) for o in look_objs]
objs = objs.union(look_objs)
# Extract objects from inventory description
self.set_state(state)
inv = clean(self.step('inventory')[0])
inv_objs = extract_objs(inv)
inv_objs = [o + ('INV',) for o in inv_objs]
objs = objs.union(inv_objs)
self.set_state(state)
# Filter out the objects that aren't in the dictionary
dict_words = [w for w in self.get_dictionary()]
max_word_length = max([len(w) for w in dict_words])
to_remove = set()
for obj in objs:
if len(obj[0].split()) > 1:
continue
if obj[0][:max_word_length] not in dict_words:
to_remove.add(obj)
objs.difference_update(to_remove)
objs_set = set()
for obj in objs:
if obj[0] not in objs_set:
objs_set.add(obj[0])
return objs_set
return self._env._identify_interactive_objects(observation=observation, use_object_tree=use_object_tree)
else:
return None
def find_valid_actions(self, possible_acts=None):
if self.jeri_style:
if self.tw_games:
diff2acts = {}
state = self.get_state()
candidate_actions = self.get_valid_actions()
for act in candidate_actions:
self.set_state(state)
self.step(act)
diff = self._env._jericho._get_world_diff()
if diff in diff2acts:
if act not in diff2acts[diff]:
diff2acts[diff].append(act)
else:
diff2acts[diff] = [act]
self.set_state(state)
return diff2acts
else:
admissible = []
candidate_acts = self._env._filter_candidate_actions(possible_acts).values()
true_actions = self._env.get_valid_actions()
for temp_list in candidate_acts:
for template in temp_list:
if template.action in true_actions:
admissible.append(template)
return admissible
else:
return None
def _score_object_names(self, interactive_objs):
""" Attempts to choose a sensible name for an object, typically a noun. """
if self.jeri_style:
def score_fn(obj):
score = -.01 * len(obj[0])
if obj[1] == 'NOUN':
score += 1
if obj[1] == 'PROPN':
score += .5
if obj[1] == 'ADJ':
score += 0
if obj[2] == 'OBJTREE':
score += .1
return score
best_names = []
for desc, objs in interactive_objs.items():
sorted_objs = sorted(objs, key=score_fn, reverse=True)
best_names.append(sorted_objs[0][0])
return best_names
else:
return None
def get_world_state_hash(self):
if self.jeri_style:
if self.tw_games:
return None
else:
return self._env.get_world_state_hash()
else:
return None
class KGA2CEnv:
'''
KGA2C environment performs additional graph-based processing.
'''
def __init__(self, rom_path, seed, spm_model, tsv_file, step_limit=None, stuck_steps=10, gat=True):
random.seed(seed)
np.random.seed(seed)
self.rom_path = rom_path
self.seed = seed
self.episode_steps = 0
self.stuck_steps = 0
self.valid_steps = 0
self.spm_model = spm_model
self.tsv_file = tsv_file
self.step_limit = step_limit
self.max_stuck_steps = stuck_steps
self.gat = gat
self.env = None
self.conn_valid = None
self.conn_openie = None
self.vocab = None
self.vocab_rev = None
self.state_rep = None
def create(self):
''' Create the Jericho environment and connect to redis. '''
infos = EnvInfos(admissible_commands=True, description=True, intermediate_reward=True)
self.env = JeriWorld(self.rom_path, self.seed, infos=infos)
self.bindings = self.env.bindings
self.act_gen = self.env.act_gen
self.max_word_len = self.bindings['max_word_length']
self.vocab, self.vocab_rev = load_vocab(self.env)
self.conn_valid = redis.Redis(host='localhost', port=6379, db=0)
self.conn_openie = redis.Redis(host='localhost', port=6379, db=1)
def _get_admissible_actions(self, objs):
''' Queries Redis for a list of admissible actions from the current state. '''
obj_ids = [self.vocab_rev[o[:self.max_word_len]] for o in objs]
possible_acts = self.act_gen.generate_template_actions(objs, obj_ids)
admissible = []
true_actions = self.env.get_valid_actions()
for template in possible_acts:
if template.action in true_actions:
admissible.append(template)
return admissible
def _build_graph_rep(self, action, ob_r):
''' Returns various graph-based representations of the current state. '''
#objs = [o[0] for o in self.env._identify_interactive_objects(ob_r)]
objs = list(self.env._identify_interactive_objects(ob_r))
admissible_actions = self._get_admissible_actions(objs)
admissible_actions_rep = [self.state_rep.get_action_rep_drqa(a.action) \
for a in admissible_actions] \
if admissible_actions else [[0] * 20]
try: # Gather additional information about the new state
save_state = self.env.get_state()
ob_l = self.env.step('look')[0]
self.env.set_state(save_state)
ob_i = self.env.step('inventory')[0]
self.env.set_state(save_state)
except RuntimeError:
print('RuntimeError: {}, Done: {}, Info: {}'.format(clean_obs(ob_r), done, info))
ob_l = ob_i = ''
ob_rep = self.state_rep.get_obs_rep(ob_l, ob_i, ob_r, action)
cleaned_obs = clean_obs(ob_l + ' ' + ob_r)
openie_cache = self.conn_openie.get(cleaned_obs)
if openie_cache is None:
rules, tocache = self.state_rep.step(cleaned_obs, ob_i, objs, action, cache=None, gat=self.gat)
self.conn_openie.set(cleaned_obs, str(tocache))
else:
openie_cache = eval(openie_cache.decode('cp1252'))
rules, _ = self.state_rep.step(cleaned_obs, ob_i, objs, action, cache=openie_cache, gat=self.gat)
graph_state = self.state_rep.graph_state
graph_state_rep = self.state_rep.graph_state_rep
action_rep = self.state_rep.get_action_rep_drqa(action)
return GraphInfo(objs, ob_rep, action_rep, graph_state, graph_state_rep,\
admissible_actions, admissible_actions_rep)
def step(self, action):
self.episode_steps += 1
obs, reward, done, info = self.env.step(action)
info['valid'] = self.env._world_changed() or done
info['steps'] = self.episode_steps
if info['valid']:
self.valid_steps += 1
self.stuck_steps = 0
else:
self.stuck_steps += 1
if (self.step_limit and self.valid_steps >= self.step_limit) \
or self.stuck_steps > self.max_stuck_steps:
done = True
if done:
graph_info = GraphInfo(objs=['all'],
ob_rep=self.state_rep.get_obs_rep(obs, obs, obs, action),
act_rep=self.state_rep.get_action_rep_drqa(action),
graph_state=self.state_rep.graph_state,
graph_state_rep=self.state_rep.graph_state_rep,
admissible_actions=[],
admissible_actions_rep=[])
else:
graph_info = self._build_graph_rep(action, obs)
return obs, reward, done, info, graph_info
def reset(self):
self.state_rep = StateAction(self.spm_model, self.vocab, self.vocab_rev,
self.tsv_file, self.max_word_len)
self.stuck_steps = 0
self.valid_steps = 0
self.episode_steps = 0
obs, info = self.env.reset()
info['valid'] = False
info['steps'] = 0
graph_info = self._build_graph_rep('look', obs)
return obs, info, graph_info
def close(self):
self.env.close() | 0.628863 | 0.200969 |
import tmt
class ProvisionLocal(tmt.steps.provision.ProvisionPlugin):
"""
Use local host for test execution
In general it is not recommended to run tests on your local machine
as there might be security risks. Run only those tests which you
know are safe so that you don't destroy your laptop ;-)
Example config:
provision:
how: local
Note that 'tmt run' is expected to be executed under a regular user.
If there are admin rights required (for example in the prepare step)
you might be asked for a sudo password.
"""
# Guest instance
_guest = None
# Supported methods
_methods = [tmt.steps.Method(name='local', doc=__doc__, order=50)]
def wake(self, data=None):
""" Override options and wake up the guest """
if data:
self._guest = GuestLocal(data, name=self.name, parent=self.step)
def go(self):
""" Provision the container """
super().go()
# Create a GuestLocal instance
data = {'guest': 'localhost'}
self._guest = GuestLocal(data, name=self.name, parent=self.step)
def guest(self):
""" Return the provisioned guest """
return self._guest
def requires(self):
""" List of required packages needed for workdir sync """
return GuestLocal.requires()
class GuestLocal(tmt.Guest):
""" Local Host """
def ansible(self, playbook):
""" Prepare localhost using ansible playbook """
playbook = self._ansible_playbook_path(playbook)
stdout, stderr = self.run(
f'sudo sh -c "stty cols {tmt.utils.OUTPUT_WIDTH}; ansible-playbook'
f'{self._ansible_verbosity()} -c local -i localhost, {playbook}"')
self._ansible_summary(stdout)
def execute(self, command, **kwargs):
""" Execute command on localhost """
return self.run(command, **kwargs)
def push(self):
""" Nothing to be done to push workdir """
def pull(self):
""" Nothing to be done to pull workdir """
@classmethod
def requires(cls):
""" No packages needed to sync workdir """
return [] | tmt/steps/provision/local.py | import tmt
class ProvisionLocal(tmt.steps.provision.ProvisionPlugin):
"""
Use local host for test execution
In general it is not recommended to run tests on your local machine
as there might be security risks. Run only those tests which you
know are safe so that you don't destroy your laptop ;-)
Example config:
provision:
how: local
Note that 'tmt run' is expected to be executed under a regular user.
If there are admin rights required (for example in the prepare step)
you might be asked for a sudo password.
"""
# Guest instance
_guest = None
# Supported methods
_methods = [tmt.steps.Method(name='local', doc=__doc__, order=50)]
def wake(self, data=None):
""" Override options and wake up the guest """
if data:
self._guest = GuestLocal(data, name=self.name, parent=self.step)
def go(self):
""" Provision the container """
super().go()
# Create a GuestLocal instance
data = {'guest': 'localhost'}
self._guest = GuestLocal(data, name=self.name, parent=self.step)
def guest(self):
""" Return the provisioned guest """
return self._guest
def requires(self):
""" List of required packages needed for workdir sync """
return GuestLocal.requires()
class GuestLocal(tmt.Guest):
""" Local Host """
def ansible(self, playbook):
""" Prepare localhost using ansible playbook """
playbook = self._ansible_playbook_path(playbook)
stdout, stderr = self.run(
f'sudo sh -c "stty cols {tmt.utils.OUTPUT_WIDTH}; ansible-playbook'
f'{self._ansible_verbosity()} -c local -i localhost, {playbook}"')
self._ansible_summary(stdout)
def execute(self, command, **kwargs):
""" Execute command on localhost """
return self.run(command, **kwargs)
def push(self):
""" Nothing to be done to push workdir """
def pull(self):
""" Nothing to be done to pull workdir """
@classmethod
def requires(cls):
""" No packages needed to sync workdir """
return [] | 0.555194 | 0.516535 |
from django.core.management.base import CommandError
from django.contrib.auth import get_user_model
from courses.api import deactivate_program_enrollment, deactivate_run_enrollment
from courses.management.utils import EnrollmentChangeCommand, enrollment_summaries
from courses.constants import ENROLL_CHANGE_STATUS_TRANSFERRED
from courses.models import CourseRunEnrollment
from users.api import fetch_user
User = get_user_model()
class Command(EnrollmentChangeCommand):
"""Sets a user's enrollment to 'transferred' and creates an enrollment for a different user"""
help = "Sets a user's enrollment to 'transferred' and creates an enrollment for a different user"
def add_arguments(self, parser):
parser.add_argument(
"--from-user",
type=str,
help="The id, email, or username of the enrolled User",
required=True,
)
parser.add_argument(
"--to-user",
type=str,
help="The id, email, or username of the User to whom the enrollment will be transferred",
required=True,
)
parser.add_argument(
"--order", type=str, help="The 'order_id' value for an user's order ID."
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--program",
type=str,
help="The 'readable_id' value for an enrolled Program",
)
group.add_argument(
"--run",
type=str,
help="The 'courseware_id' value for an enrolled CourseRun",
)
parser.add_argument(
"-k",
"--keep-failed-enrollments",
action="store_true",
dest="keep_failed_enrollments",
help="If provided, enrollment records will be kept even if edX enrollment fails",
)
super().add_arguments(parser)
def handle(self, *args, **options):
from_user = fetch_user(options["from_user"])
to_user = fetch_user(options["to_user"])
keep_failed_enrollments = options["keep_failed_enrollments"]
enrollment, enrolled_obj = self.fetch_enrollment(from_user, options)
if options["program"]:
to_user_existing_enrolled_run_ids = CourseRunEnrollment.get_program_run_enrollments(
user=to_user, program=enrolled_obj
).values_list(
"run__courseware_id", flat=True
)
if len(to_user_existing_enrolled_run_ids) > 0:
raise CommandError(
"'to' user is already enrolled in program runs ({})".format(
list(to_user_existing_enrolled_run_ids)
)
)
new_program_enrollment, new_run_enrollments = self.create_program_enrollment(
enrollment,
to_user=to_user,
keep_failed_enrollments=keep_failed_enrollments,
)
if new_program_enrollment and new_run_enrollments:
deactivate_program_enrollment(
enrollment,
change_status=ENROLL_CHANGE_STATUS_TRANSFERRED,
keep_failed_enrollments=keep_failed_enrollments,
)
else:
new_program_enrollment = None
new_run_enrollment = self.create_run_enrollment(
enrollment,
to_user=to_user,
keep_failed_enrollments=keep_failed_enrollments,
)
new_run_enrollments = []
if new_run_enrollment:
new_run_enrollments.append(new_run_enrollment)
deactivate_run_enrollment(
enrollment,
change_status=ENROLL_CHANGE_STATUS_TRANSFERRED,
keep_failed_enrollments=keep_failed_enrollments,
)
if new_program_enrollment or new_run_enrollments:
self.stdout.write(
self.style.SUCCESS(
"Transferred enrollment โ 'from' user: {} ({}), 'to' user: {} ({})\n"
"Enrollments created/updated: {}".format(
from_user.username,
from_user.email,
to_user.username,
to_user.email,
enrollment_summaries(
filter(bool, [new_program_enrollment] + new_run_enrollments)
),
)
)
)
else:
self.stdout.write(
self.style.ERROR(
"Failed to transfer enrollment โ 'from' user: {} ({}), 'to' user: {} ({})\n".format(
from_user.username,
from_user.email,
to_user.username,
to_user.email,
)
)
) | courses/management/commands/transfer_enrollment.py | from django.core.management.base import CommandError
from django.contrib.auth import get_user_model
from courses.api import deactivate_program_enrollment, deactivate_run_enrollment
from courses.management.utils import EnrollmentChangeCommand, enrollment_summaries
from courses.constants import ENROLL_CHANGE_STATUS_TRANSFERRED
from courses.models import CourseRunEnrollment
from users.api import fetch_user
User = get_user_model()
class Command(EnrollmentChangeCommand):
"""Sets a user's enrollment to 'transferred' and creates an enrollment for a different user"""
help = "Sets a user's enrollment to 'transferred' and creates an enrollment for a different user"
def add_arguments(self, parser):
parser.add_argument(
"--from-user",
type=str,
help="The id, email, or username of the enrolled User",
required=True,
)
parser.add_argument(
"--to-user",
type=str,
help="The id, email, or username of the User to whom the enrollment will be transferred",
required=True,
)
parser.add_argument(
"--order", type=str, help="The 'order_id' value for an user's order ID."
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"--program",
type=str,
help="The 'readable_id' value for an enrolled Program",
)
group.add_argument(
"--run",
type=str,
help="The 'courseware_id' value for an enrolled CourseRun",
)
parser.add_argument(
"-k",
"--keep-failed-enrollments",
action="store_true",
dest="keep_failed_enrollments",
help="If provided, enrollment records will be kept even if edX enrollment fails",
)
super().add_arguments(parser)
def handle(self, *args, **options):
from_user = fetch_user(options["from_user"])
to_user = fetch_user(options["to_user"])
keep_failed_enrollments = options["keep_failed_enrollments"]
enrollment, enrolled_obj = self.fetch_enrollment(from_user, options)
if options["program"]:
to_user_existing_enrolled_run_ids = CourseRunEnrollment.get_program_run_enrollments(
user=to_user, program=enrolled_obj
).values_list(
"run__courseware_id", flat=True
)
if len(to_user_existing_enrolled_run_ids) > 0:
raise CommandError(
"'to' user is already enrolled in program runs ({})".format(
list(to_user_existing_enrolled_run_ids)
)
)
new_program_enrollment, new_run_enrollments = self.create_program_enrollment(
enrollment,
to_user=to_user,
keep_failed_enrollments=keep_failed_enrollments,
)
if new_program_enrollment and new_run_enrollments:
deactivate_program_enrollment(
enrollment,
change_status=ENROLL_CHANGE_STATUS_TRANSFERRED,
keep_failed_enrollments=keep_failed_enrollments,
)
else:
new_program_enrollment = None
new_run_enrollment = self.create_run_enrollment(
enrollment,
to_user=to_user,
keep_failed_enrollments=keep_failed_enrollments,
)
new_run_enrollments = []
if new_run_enrollment:
new_run_enrollments.append(new_run_enrollment)
deactivate_run_enrollment(
enrollment,
change_status=ENROLL_CHANGE_STATUS_TRANSFERRED,
keep_failed_enrollments=keep_failed_enrollments,
)
if new_program_enrollment or new_run_enrollments:
self.stdout.write(
self.style.SUCCESS(
"Transferred enrollment โ 'from' user: {} ({}), 'to' user: {} ({})\n"
"Enrollments created/updated: {}".format(
from_user.username,
from_user.email,
to_user.username,
to_user.email,
enrollment_summaries(
filter(bool, [new_program_enrollment] + new_run_enrollments)
),
)
)
)
else:
self.stdout.write(
self.style.ERROR(
"Failed to transfer enrollment โ 'from' user: {} ({}), 'to' user: {} ({})\n".format(
from_user.username,
from_user.email,
to_user.username,
to_user.email,
)
)
) | 0.428592 | 0.179279 |
from tests.fields.subclass_models import RaceParticipant, RacePlacingEnum
from tortoise.contrib import test
class TestCustomFieldFilters(test.IsolatedTestCase):
tortoise_test_modules = ["tests.fields.subclass_models"]
async def asyncSetUp(self):
await super().asyncSetUp()
await RaceParticipant.create(
first_name="George", place=RacePlacingEnum.FIRST, predicted_place=RacePlacingEnum.SECOND
)
await RaceParticipant.create(
first_name="John", place=RacePlacingEnum.SECOND, predicted_place=RacePlacingEnum.THIRD
)
await RaceParticipant.create(first_name="Paul", place=RacePlacingEnum.THIRD)
await RaceParticipant.create(first_name="Ringo", place=RacePlacingEnum.RUNNER_UP)
await RaceParticipant.create(first_name="Stuart", predicted_place=RacePlacingEnum.FIRST)
async def test_equal(self):
self.assertEqual(
set(
await RaceParticipant.filter(place=RacePlacingEnum.FIRST).values_list(
"place", flat=True
)
),
{RacePlacingEnum.FIRST},
)
async def test_not(self):
self.assertEqual(
set(
await RaceParticipant.filter(place__not=RacePlacingEnum.FIRST).values_list(
"place", flat=True
)
),
{
RacePlacingEnum.SECOND,
RacePlacingEnum.THIRD,
RacePlacingEnum.RUNNER_UP,
RacePlacingEnum.DNF,
},
)
async def test_in(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(
place__in=[RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP]
).values_list("place", flat=True)
),
{RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP},
)
async def test_not_in(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(
place__not_in=[RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP]
).values_list("place", flat=True)
),
{RacePlacingEnum.FIRST, RacePlacingEnum.SECOND, RacePlacingEnum.THIRD},
)
async def test_isnull(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__isnull=True).values_list(
"first_name", flat=True
)
),
{"Paul", "Ringo"},
)
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__isnull=False).values_list(
"first_name", flat=True
)
),
{"George", "John", "Stuart"},
)
async def test_not_isnull(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__not_isnull=False).values_list(
"first_name", flat=True
)
),
{"Paul", "Ringo"},
)
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__not_isnull=True).values_list(
"first_name", flat=True
)
),
{"George", "John", "Stuart"},
) | tests/fields/test_subclass_filters.py | from tests.fields.subclass_models import RaceParticipant, RacePlacingEnum
from tortoise.contrib import test
class TestCustomFieldFilters(test.IsolatedTestCase):
tortoise_test_modules = ["tests.fields.subclass_models"]
async def asyncSetUp(self):
await super().asyncSetUp()
await RaceParticipant.create(
first_name="George", place=RacePlacingEnum.FIRST, predicted_place=RacePlacingEnum.SECOND
)
await RaceParticipant.create(
first_name="John", place=RacePlacingEnum.SECOND, predicted_place=RacePlacingEnum.THIRD
)
await RaceParticipant.create(first_name="Paul", place=RacePlacingEnum.THIRD)
await RaceParticipant.create(first_name="Ringo", place=RacePlacingEnum.RUNNER_UP)
await RaceParticipant.create(first_name="Stuart", predicted_place=RacePlacingEnum.FIRST)
async def test_equal(self):
self.assertEqual(
set(
await RaceParticipant.filter(place=RacePlacingEnum.FIRST).values_list(
"place", flat=True
)
),
{RacePlacingEnum.FIRST},
)
async def test_not(self):
self.assertEqual(
set(
await RaceParticipant.filter(place__not=RacePlacingEnum.FIRST).values_list(
"place", flat=True
)
),
{
RacePlacingEnum.SECOND,
RacePlacingEnum.THIRD,
RacePlacingEnum.RUNNER_UP,
RacePlacingEnum.DNF,
},
)
async def test_in(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(
place__in=[RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP]
).values_list("place", flat=True)
),
{RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP},
)
async def test_not_in(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(
place__not_in=[RacePlacingEnum.DNF, RacePlacingEnum.RUNNER_UP]
).values_list("place", flat=True)
),
{RacePlacingEnum.FIRST, RacePlacingEnum.SECOND, RacePlacingEnum.THIRD},
)
async def test_isnull(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__isnull=True).values_list(
"first_name", flat=True
)
),
{"Paul", "Ringo"},
)
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__isnull=False).values_list(
"first_name", flat=True
)
),
{"George", "John", "Stuart"},
)
async def test_not_isnull(self):
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__not_isnull=False).values_list(
"first_name", flat=True
)
),
{"Paul", "Ringo"},
)
self.assertSetEqual(
set(
await RaceParticipant.filter(predicted_place__not_isnull=True).values_list(
"first_name", flat=True
)
),
{"George", "John", "Stuart"},
) | 0.68941 | 0.327225 |
import pytest
import respx
from httpx import Response
from nonebug.app import App
from .utils import get_file, get_json
@pytest.fixture
def mcbbsnews(app: App):
from nonebot_bison.platform import platform_manager
return platform_manager["mcbbsnews"]
@pytest.fixture(scope="module")
def raw_post_list():
return get_json("mcbbsnews/mcbbsnews_raw_post_list.json")
@pytest.fixture(scope="module")
def javanews_post_0():
return get_file("mcbbsnews/post/mcbbsnews_java_post-0.txt")
@pytest.fixture(scope="module")
def javanews_post_1():
return get_file("mcbbsnews/post/mcbbsnews_java_post-1.txt")
@pytest.fixture(scope="module")
def bedrocknews_post():
return get_file("mcbbsnews/post/mcbbsnews_bedrock_post.txt")
@pytest.mark.asyncio
@respx.mock
async def test_javanews_parser(mcbbsnews, raw_post_list, javanews_post_0):
javanews_mock = respx.get("https://www.mcbbs.net/thread-1338607-1-1.html")
javanews_mock.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_javanews.html")
)
)
post = await mcbbsnews.parse(raw_post_list[3])
assert post.text == javanews_post_0
@pytest.mark.asyncio
@respx.mock
async def test_bedrocknews_parser(mcbbsnews, raw_post_list, bedrocknews_post):
bedrocknews_mock = respx.get("https://www.mcbbs.net/thread-1338592-1-1.html")
bedrocknews_mock.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_bedrocknews.html")
)
)
post = await mcbbsnews.parse(raw_post_list[4])
assert post.text == bedrocknews_post
@pytest.mark.asyncio
@respx.mock
async def test_bedrock_express_parser(mcbbsnews, raw_post_list):
bedrock_express_mock = respx.get("https://www.mcbbs.net/thread-1332424-1-1.html")
bedrock_express_mock.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_bedrock_express.html")
)
)
bedrock_express_post = await mcbbsnews.parse(raw_post_list[13])
assert bedrock_express_post.text == get_file(
"mcbbsnews/post/mcbbsnews_bedrock_express_post.txt"
)
@pytest.mark.asyncio
@respx.mock
async def test_java_express_parser(mcbbsnews, raw_post_list):
java_express_mock = respx.get("https://www.mcbbs.net/thread-1340080-1-1.html")
java_express_mock.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_java_express.html")
)
)
java_express_post = await mcbbsnews.parse(raw_post_list[0])
assert java_express_post.text == get_file(
"mcbbsnews/post/mcbbsnews_java_express_post.txt"
)
@pytest.mark.asyncio
@respx.mock
async def test_merch_parser(mcbbsnews, raw_post_list):
mc_merch_mock = respx.get("https://www.mcbbs.net/thread-1342236-1-1.html")
mc_merch_mock.mock(
return_value=Response(200, text=get_file("mcbbsnews/mock/mcbbsnews_merch.html"))
)
mc_merch_post = await mcbbsnews.parse(raw_post_list[26])
assert mc_merch_post.text == get_file("mcbbsnews/post/mcbbsnews_merch_post.txt")
@pytest.mark.asyncio
@respx.mock
async def test_fetch_new(mcbbsnews, dummy_user_subinfo, javanews_post_1):
news_router = respx.get("https://www.mcbbs.net/forum-news-1.html")
news_router.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_post_list_html-0.html")
)
)
new_post = respx.get("https://www.mcbbs.net/thread-1340927-1-1.html")
new_post.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_new_post_html.html")
)
)
target = ""
res = await mcbbsnews.fetch_new_post(target, [dummy_user_subinfo])
assert news_router.called
assert len(res) == 0
news_router.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_post_list_html-1.html")
)
)
res = await mcbbsnews.fetch_new_post(target, [dummy_user_subinfo])
assert news_router.called
post = res[0][1][0]
assert post.target_type == "MCBBSๅนป็ฟผๅ่ฎฏ"
assert post.text == javanews_post_1
assert post.url == "https://www.mcbbs.net/thread-1340927-1-1.html"
assert post.target_name == "Java็ๆฌ่ต่ฎฏ" | tests/platforms/test_mcbbsnews.py | import pytest
import respx
from httpx import Response
from nonebug.app import App
from .utils import get_file, get_json
@pytest.fixture
def mcbbsnews(app: App):
from nonebot_bison.platform import platform_manager
return platform_manager["mcbbsnews"]
@pytest.fixture(scope="module")
def raw_post_list():
return get_json("mcbbsnews/mcbbsnews_raw_post_list.json")
@pytest.fixture(scope="module")
def javanews_post_0():
return get_file("mcbbsnews/post/mcbbsnews_java_post-0.txt")
@pytest.fixture(scope="module")
def javanews_post_1():
return get_file("mcbbsnews/post/mcbbsnews_java_post-1.txt")
@pytest.fixture(scope="module")
def bedrocknews_post():
return get_file("mcbbsnews/post/mcbbsnews_bedrock_post.txt")
@pytest.mark.asyncio
@respx.mock
async def test_javanews_parser(mcbbsnews, raw_post_list, javanews_post_0):
javanews_mock = respx.get("https://www.mcbbs.net/thread-1338607-1-1.html")
javanews_mock.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_javanews.html")
)
)
post = await mcbbsnews.parse(raw_post_list[3])
assert post.text == javanews_post_0
@pytest.mark.asyncio
@respx.mock
async def test_bedrocknews_parser(mcbbsnews, raw_post_list, bedrocknews_post):
bedrocknews_mock = respx.get("https://www.mcbbs.net/thread-1338592-1-1.html")
bedrocknews_mock.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_bedrocknews.html")
)
)
post = await mcbbsnews.parse(raw_post_list[4])
assert post.text == bedrocknews_post
@pytest.mark.asyncio
@respx.mock
async def test_bedrock_express_parser(mcbbsnews, raw_post_list):
bedrock_express_mock = respx.get("https://www.mcbbs.net/thread-1332424-1-1.html")
bedrock_express_mock.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_bedrock_express.html")
)
)
bedrock_express_post = await mcbbsnews.parse(raw_post_list[13])
assert bedrock_express_post.text == get_file(
"mcbbsnews/post/mcbbsnews_bedrock_express_post.txt"
)
@pytest.mark.asyncio
@respx.mock
async def test_java_express_parser(mcbbsnews, raw_post_list):
java_express_mock = respx.get("https://www.mcbbs.net/thread-1340080-1-1.html")
java_express_mock.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_java_express.html")
)
)
java_express_post = await mcbbsnews.parse(raw_post_list[0])
assert java_express_post.text == get_file(
"mcbbsnews/post/mcbbsnews_java_express_post.txt"
)
@pytest.mark.asyncio
@respx.mock
async def test_merch_parser(mcbbsnews, raw_post_list):
mc_merch_mock = respx.get("https://www.mcbbs.net/thread-1342236-1-1.html")
mc_merch_mock.mock(
return_value=Response(200, text=get_file("mcbbsnews/mock/mcbbsnews_merch.html"))
)
mc_merch_post = await mcbbsnews.parse(raw_post_list[26])
assert mc_merch_post.text == get_file("mcbbsnews/post/mcbbsnews_merch_post.txt")
@pytest.mark.asyncio
@respx.mock
async def test_fetch_new(mcbbsnews, dummy_user_subinfo, javanews_post_1):
news_router = respx.get("https://www.mcbbs.net/forum-news-1.html")
news_router.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_post_list_html-0.html")
)
)
new_post = respx.get("https://www.mcbbs.net/thread-1340927-1-1.html")
new_post.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_new_post_html.html")
)
)
target = ""
res = await mcbbsnews.fetch_new_post(target, [dummy_user_subinfo])
assert news_router.called
assert len(res) == 0
news_router.mock(
return_value=Response(
200, text=get_file("mcbbsnews/mock/mcbbsnews_post_list_html-1.html")
)
)
res = await mcbbsnews.fetch_new_post(target, [dummy_user_subinfo])
assert news_router.called
post = res[0][1][0]
assert post.target_type == "MCBBSๅนป็ฟผๅ่ฎฏ"
assert post.text == javanews_post_1
assert post.url == "https://www.mcbbs.net/thread-1340927-1-1.html"
assert post.target_name == "Java็ๆฌ่ต่ฎฏ" | 0.374676 | 0.161122 |
import tensorflow as tf
from CONSTANT import AUDIO_SAMPLE_RATE, IS_CUT_AUDIO, MAX_AUDIO_DURATION
from data_process import extract_mfcc_parallel, get_max_length, ohe2cat, pad_seq
from models.attention import Attention
from models.my_classifier import Classifier
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.layers import (
Activation, Bidirectional, CuDNNLSTM, Dense, Dropout, GlobalMaxPool1D, Input, SpatialDropout1D
)
from tensorflow.python.keras.models import Model as TFModel
from tools import log
class BilstmAttention(Classifier):
def __init__(self):
# clear_session()
log('init BilstmAttention')
self.max_length = None
self._model = None
self.is_init = False
def preprocess_data(self, x):
if IS_CUT_AUDIO:
x = [sample[0:MAX_AUDIO_DURATION * AUDIO_SAMPLE_RATE] for sample in x]
# extract mfcc
x = extract_mfcc_parallel(x, n_mfcc=96)
if self.max_length is None:
self.max_length = get_max_length(x)
self.max_length = min(800, self.max_length)
x = pad_seq(x, pad_len=self.max_length)
return x
def init_model(self, input_shape, num_classes, **kwargs):
inputs = Input(shape=input_shape)
# bnorm_1 = BatchNormalization(axis=2)(inputs)
lstm_1 = Bidirectional(
CuDNNLSTM(64, name='blstm_1', return_sequences=True), merge_mode='concat'
)(inputs)
activation_1 = Activation('tanh')(lstm_1)
dropout1 = SpatialDropout1D(0.5)(activation_1)
attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
pool_1 = GlobalMaxPool1D()(attention_1)
dropout2 = Dropout(rate=0.5)(pool_1)
dense_1 = Dense(units=256, activation='relu')(dropout2)
outputs = Dense(units=num_classes, activation='softmax')(dense_1)
model = TFModel(inputs=inputs, outputs=outputs)
optimizer = optimizers.Adam(
# learning_rate=1e-3,
lr=1e-3,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-08,
decay=0.0002,
amsgrad=True
)
model.compile(
optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']
)
model.summary()
self._model = model
self.is_init = True
def fit(self, train_x, train_y, validation_data_fit, round_num, **kwargs):
val_x, val_y = validation_data_fit
if round_num >= 2:
epochs = 10
else:
epochs = 5
patience = 2
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)]
self._model.fit(
train_x,
ohe2cat(train_y),
epochs=epochs,
callbacks=callbacks,
validation_data=(val_x, ohe2cat(val_y)),
verbose=1, # Logs once per epoch.
batch_size=32,
shuffle=True
)
def predict(self, x_test, batch_size=32):
return self._model.predict(x_test, batch_size=batch_size) | examples/automl_freiburg/winner_speech/models/bilstm_attention.py | import tensorflow as tf
from CONSTANT import AUDIO_SAMPLE_RATE, IS_CUT_AUDIO, MAX_AUDIO_DURATION
from data_process import extract_mfcc_parallel, get_max_length, ohe2cat, pad_seq
from models.attention import Attention
from models.my_classifier import Classifier
from tensorflow.python.keras import optimizers
from tensorflow.python.keras.layers import (
Activation, Bidirectional, CuDNNLSTM, Dense, Dropout, GlobalMaxPool1D, Input, SpatialDropout1D
)
from tensorflow.python.keras.models import Model as TFModel
from tools import log
class BilstmAttention(Classifier):
def __init__(self):
# clear_session()
log('init BilstmAttention')
self.max_length = None
self._model = None
self.is_init = False
def preprocess_data(self, x):
if IS_CUT_AUDIO:
x = [sample[0:MAX_AUDIO_DURATION * AUDIO_SAMPLE_RATE] for sample in x]
# extract mfcc
x = extract_mfcc_parallel(x, n_mfcc=96)
if self.max_length is None:
self.max_length = get_max_length(x)
self.max_length = min(800, self.max_length)
x = pad_seq(x, pad_len=self.max_length)
return x
def init_model(self, input_shape, num_classes, **kwargs):
inputs = Input(shape=input_shape)
# bnorm_1 = BatchNormalization(axis=2)(inputs)
lstm_1 = Bidirectional(
CuDNNLSTM(64, name='blstm_1', return_sequences=True), merge_mode='concat'
)(inputs)
activation_1 = Activation('tanh')(lstm_1)
dropout1 = SpatialDropout1D(0.5)(activation_1)
attention_1 = Attention(8, 16)([dropout1, dropout1, dropout1])
pool_1 = GlobalMaxPool1D()(attention_1)
dropout2 = Dropout(rate=0.5)(pool_1)
dense_1 = Dense(units=256, activation='relu')(dropout2)
outputs = Dense(units=num_classes, activation='softmax')(dense_1)
model = TFModel(inputs=inputs, outputs=outputs)
optimizer = optimizers.Adam(
# learning_rate=1e-3,
lr=1e-3,
beta_1=0.9,
beta_2=0.999,
epsilon=1e-08,
decay=0.0002,
amsgrad=True
)
model.compile(
optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy']
)
model.summary()
self._model = model
self.is_init = True
def fit(self, train_x, train_y, validation_data_fit, round_num, **kwargs):
val_x, val_y = validation_data_fit
if round_num >= 2:
epochs = 10
else:
epochs = 5
patience = 2
callbacks = [tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=patience)]
self._model.fit(
train_x,
ohe2cat(train_y),
epochs=epochs,
callbacks=callbacks,
validation_data=(val_x, ohe2cat(val_y)),
verbose=1, # Logs once per epoch.
batch_size=32,
shuffle=True
)
def predict(self, x_test, batch_size=32):
return self._model.predict(x_test, batch_size=batch_size) | 0.864697 | 0.262552 |
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import six
# Bokeh imports
from .code import CodeHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class ScriptHandler(CodeHandler):
''' Modify Bokeh documents by executing code from Python scripts.
'''
_logger_text = "%s: call to %s() ignored when running scripts with the 'bokeh' command."
_origin = "Script"
def __init__(self, *args, **kwargs):
'''
Keywords:
filename (str) : a path to a Python source (".py") file
'''
if 'filename' not in kwargs:
raise ValueError('Must pass a filename to ScriptHandler')
filename = kwargs['filename']
# For Python 3, encoding must be set to utf-8 because:
# - when specifying an encoding in `io.open`, it doesn't
# work with Python 2,
# - default encoding used by Python 3 `open` statement
# is not 'utf-8' on Windows platform,
# - Python 3 `open` ignores le `coding` comment line.
# See https://github.com/bokeh/bokeh/pull/8202 for details.
if six.PY3:
with open(filename, 'r', encoding='utf-8') as f:
kwargs['source'] = f.read()
else:
with open(filename, 'r') as f:
kwargs['source'] = f.read()
super(ScriptHandler, self).__init__(*args, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- | bokeh/application/handlers/script.py | #-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
import six
# Bokeh imports
from .code import CodeHandler
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
class ScriptHandler(CodeHandler):
''' Modify Bokeh documents by executing code from Python scripts.
'''
_logger_text = "%s: call to %s() ignored when running scripts with the 'bokeh' command."
_origin = "Script"
def __init__(self, *args, **kwargs):
'''
Keywords:
filename (str) : a path to a Python source (".py") file
'''
if 'filename' not in kwargs:
raise ValueError('Must pass a filename to ScriptHandler')
filename = kwargs['filename']
# For Python 3, encoding must be set to utf-8 because:
# - when specifying an encoding in `io.open`, it doesn't
# work with Python 2,
# - default encoding used by Python 3 `open` statement
# is not 'utf-8' on Windows platform,
# - Python 3 `open` ignores le `coding` comment line.
# See https://github.com/bokeh/bokeh/pull/8202 for details.
if six.PY3:
with open(filename, 'r', encoding='utf-8') as f:
kwargs['source'] = f.read()
else:
with open(filename, 'r') as f:
kwargs['source'] = f.read()
super(ScriptHandler, self).__init__(*args, **kwargs)
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#----------------------------------------------------------------------------- | 0.514644 | 0.048294 |
from __future__ import absolute_import
from __future__ import unicode_literals
import os
import subprocess
import tempfile
from subprocess import PIPE
import requests
import six
from io import open
def tmpfile(*args, **kwargs):
fd, path = tempfile.mkstemp(*args, **kwargs)
return (os.fdopen(fd, 'w'), path)
def simple_post(data, url, content_type="text/xml", timeout=60, headers=None, auth=None, verify=None):
"""
POST with a cleaner API, and return the actual HTTPResponse object, so
that error codes can be interpreted.
"""
if isinstance(data, six.text_type):
data = data.encode('utf-8') # can't pass unicode to http request posts
default_headers = requests.structures.CaseInsensitiveDict({
"content-type": content_type,
"content-length": str(len(data)),
})
if headers:
default_headers.update(headers)
kwargs = {
"headers": default_headers,
"timeout": timeout,
}
if auth:
kwargs["auth"] = auth
if verify is not None:
kwargs["verify"] = verify
return requests.post(url, data, **kwargs)
def post_data(data, url, curl_command="curl", use_curl=False,
content_type="text/xml", path=None, use_chunked=False,
is_odk=False, attachments=None):
"""
Do a POST of data with some options. Returns a tuple of the response
from the server and any errors
if it's ODK, then also process any additional attachments that are an array
of tuples of the name and the path
"""
attachments = attachments or []
results = None
errors = None
if path is not None:
with open(path, 'rb') as f:
data = f.read()
try:
if use_curl:
if path is None:
tmp_file, path = tmpfile()
with tmp_file:
tmp_file.write(data)
params = [curl_command, '--request', 'POST' ]
params.append('--insecure')
if is_odk == False:
#it's legacy j2me
params.append('--header')
params.append('Content-type:%s' % content_type)
params.append('--data-binary')
params.append('@%s' % path)
else:
params.append('-F')
params.append('xml_submission_file=@%s' % path)
if attachments:
for attach in attachments:
params.append('-F')
params.append('%s=@%s' % (attach[0], attach[1]))
if use_chunked:
params.append('--header')
params.append('Transfer-encoding:chunked')
else:
if not is_odk:
params.append('--header')
params.append('Content-length:%s' % len(data))
params.append(url)
p = subprocess.Popen(params,
stdout=PIPE, stderr=PIPE, shell=False)
errors = p.stderr.read()
results = p.stdout.read()
else:
results = simple_post(data, url, content_type).read()
except Exception as e:
errors = str(e)
return results, errors
def post_file(filename, url, curl_command="curl", use_curl=False,
content_type="text/xml"):
"""
Do a POST from file with some options. Returns a tuple of the response
from the server and any errors.
"""
return post_data(None, url, curl_command, use_curl, content_type, filename) | corehq/ex-submodules/dimagi/utils/post.py | from __future__ import absolute_import
from __future__ import unicode_literals
import os
import subprocess
import tempfile
from subprocess import PIPE
import requests
import six
from io import open
def tmpfile(*args, **kwargs):
fd, path = tempfile.mkstemp(*args, **kwargs)
return (os.fdopen(fd, 'w'), path)
def simple_post(data, url, content_type="text/xml", timeout=60, headers=None, auth=None, verify=None):
"""
POST with a cleaner API, and return the actual HTTPResponse object, so
that error codes can be interpreted.
"""
if isinstance(data, six.text_type):
data = data.encode('utf-8') # can't pass unicode to http request posts
default_headers = requests.structures.CaseInsensitiveDict({
"content-type": content_type,
"content-length": str(len(data)),
})
if headers:
default_headers.update(headers)
kwargs = {
"headers": default_headers,
"timeout": timeout,
}
if auth:
kwargs["auth"] = auth
if verify is not None:
kwargs["verify"] = verify
return requests.post(url, data, **kwargs)
def post_data(data, url, curl_command="curl", use_curl=False,
content_type="text/xml", path=None, use_chunked=False,
is_odk=False, attachments=None):
"""
Do a POST of data with some options. Returns a tuple of the response
from the server and any errors
if it's ODK, then also process any additional attachments that are an array
of tuples of the name and the path
"""
attachments = attachments or []
results = None
errors = None
if path is not None:
with open(path, 'rb') as f:
data = f.read()
try:
if use_curl:
if path is None:
tmp_file, path = tmpfile()
with tmp_file:
tmp_file.write(data)
params = [curl_command, '--request', 'POST' ]
params.append('--insecure')
if is_odk == False:
#it's legacy j2me
params.append('--header')
params.append('Content-type:%s' % content_type)
params.append('--data-binary')
params.append('@%s' % path)
else:
params.append('-F')
params.append('xml_submission_file=@%s' % path)
if attachments:
for attach in attachments:
params.append('-F')
params.append('%s=@%s' % (attach[0], attach[1]))
if use_chunked:
params.append('--header')
params.append('Transfer-encoding:chunked')
else:
if not is_odk:
params.append('--header')
params.append('Content-length:%s' % len(data))
params.append(url)
p = subprocess.Popen(params,
stdout=PIPE, stderr=PIPE, shell=False)
errors = p.stderr.read()
results = p.stdout.read()
else:
results = simple_post(data, url, content_type).read()
except Exception as e:
errors = str(e)
return results, errors
def post_file(filename, url, curl_command="curl", use_curl=False,
content_type="text/xml"):
"""
Do a POST from file with some options. Returns a tuple of the response
from the server and any errors.
"""
return post_data(None, url, curl_command, use_curl, content_type, filename) | 0.435781 | 0.084417 |
import services
import sims4.commands
from server_commands.argument_helpers import SimInfoParam, TunableInstanceParam
RELATIONSHIP_MAX_SCORE = 100
FRIEND_TYPE = 'LTR_Friendship_Main'
ROMANCE_TYPE = 'LTR_Romance_Main'
# become_friend <FirstName> <LastName>
# the param HAS TO be : info1: SimInfoParam, info2: SimInfoParam
@sims4.commands.Command('become_friend', command_type=(sims4.commands.CommandType.Live))
def become_friend(info1: SimInfoParam, _connection=None):
tgt_client = services.client_manager().get(_connection)
output = sims4.commands.CheatOutput(_connection)
sim = tgt_client.active_sim
if info1 is None:
output("target sim not exists")
return
sim.relationship_tracker.set_relationship_score(info1.id, RELATIONSHIP_MAX_SCORE,
TunableInstanceParam(sims4.resources.Types.STATISTIC)(FRIEND_TYPE))
output("become friends successfully.")
@sims4.commands.Command('become_lover', command_type=(sims4.commands.CommandType.Live), )
def become_lover(info1: SimInfoParam, _connection=None):
tgt_client = services.client_manager().get(_connection)
output = sims4.commands.CheatOutput(_connection)
sim = tgt_client.active_sim
if info1 is None:
output("target sim not exists")
return
sim.relationship_tracker.set_relationship_score(info1.id, RELATIONSHIP_MAX_SCORE,
TunableInstanceParam(sims4.resources.Types.STATISTIC)(ROMANCE_TYPE))
output("become lovers successfully.")
@sims4.commands.Command('assign_friend', command_type=(sims4.commands.CommandType.Live))
def assign_friend(info1: SimInfoParam, info2: SimInfoParam, _connection=None):
output = sims4.commands.CheatOutput(_connection)
if info1 is None or info2 is None:
output("at least one of the target sim does not exist")
return
info1.relationship_tracker.set_relationship_score(info2.id, RELATIONSHIP_MAX_SCORE,
TunableInstanceParam(sims4.resources.Types.STATISTIC)(
FRIEND_TYPE))
output("set friends successfully.")
@sims4.commands.Command('assign_lover', command_type=(sims4.commands.CommandType.Live))
def assign_lover(info1: SimInfoParam, info2: SimInfoParam, _connection=None):
output = sims4.commands.CheatOutput(_connection)
if info1 is None or info2 is None:
output("at least one of the target sim does not exist")
return
info1.relationship_tracker.set_relationship_score(info2.id, RELATIONSHIP_MAX_SCORE,
TunableInstanceParam(sims4.resources.Types.STATISTIC)(
ROMANCE_TYPE))
output("set lovers successfully.") | src/relationship.py | import services
import sims4.commands
from server_commands.argument_helpers import SimInfoParam, TunableInstanceParam
RELATIONSHIP_MAX_SCORE = 100
FRIEND_TYPE = 'LTR_Friendship_Main'
ROMANCE_TYPE = 'LTR_Romance_Main'
# become_friend <FirstName> <LastName>
# the param HAS TO be : info1: SimInfoParam, info2: SimInfoParam
@sims4.commands.Command('become_friend', command_type=(sims4.commands.CommandType.Live))
def become_friend(info1: SimInfoParam, _connection=None):
tgt_client = services.client_manager().get(_connection)
output = sims4.commands.CheatOutput(_connection)
sim = tgt_client.active_sim
if info1 is None:
output("target sim not exists")
return
sim.relationship_tracker.set_relationship_score(info1.id, RELATIONSHIP_MAX_SCORE,
TunableInstanceParam(sims4.resources.Types.STATISTIC)(FRIEND_TYPE))
output("become friends successfully.")
@sims4.commands.Command('become_lover', command_type=(sims4.commands.CommandType.Live), )
def become_lover(info1: SimInfoParam, _connection=None):
tgt_client = services.client_manager().get(_connection)
output = sims4.commands.CheatOutput(_connection)
sim = tgt_client.active_sim
if info1 is None:
output("target sim not exists")
return
sim.relationship_tracker.set_relationship_score(info1.id, RELATIONSHIP_MAX_SCORE,
TunableInstanceParam(sims4.resources.Types.STATISTIC)(ROMANCE_TYPE))
output("become lovers successfully.")
@sims4.commands.Command('assign_friend', command_type=(sims4.commands.CommandType.Live))
def assign_friend(info1: SimInfoParam, info2: SimInfoParam, _connection=None):
output = sims4.commands.CheatOutput(_connection)
if info1 is None or info2 is None:
output("at least one of the target sim does not exist")
return
info1.relationship_tracker.set_relationship_score(info2.id, RELATIONSHIP_MAX_SCORE,
TunableInstanceParam(sims4.resources.Types.STATISTIC)(
FRIEND_TYPE))
output("set friends successfully.")
@sims4.commands.Command('assign_lover', command_type=(sims4.commands.CommandType.Live))
def assign_lover(info1: SimInfoParam, info2: SimInfoParam, _connection=None):
output = sims4.commands.CheatOutput(_connection)
if info1 is None or info2 is None:
output("at least one of the target sim does not exist")
return
info1.relationship_tracker.set_relationship_score(info2.id, RELATIONSHIP_MAX_SCORE,
TunableInstanceParam(sims4.resources.Types.STATISTIC)(
ROMANCE_TYPE))
output("set lovers successfully.") | 0.32338 | 0.140513 |
from datetime import date, timedelta
from django import forms
from django.utils.translation import ugettext_lazy as _
from frozendict import frozendict
from api.task.serializers import TASK_STATES, TASK_OBJECT_TYPES, TaskLogFilterSerializer
class BaseTaskLogFilterForm(forms.Form):
DEFAULT_DATE_FROM = frozendict({'days': 15})
_ser = None
status = forms.ChoiceField(label=_('Status'), required=False, choices=TASK_STATES,
widget=forms.Select(attrs={'class': 'fill-up input-navigation select-transparent'}))
show_running = forms.BooleanField(label=_('Show only running tasks'), required=False,
widget=forms.CheckboxInput(attrs={'class': 'checkbox fill-up input-navigation'}))
hide_auto = forms.BooleanField(label=_('Hide automatic tasks'), required=False,
widget=forms.CheckboxInput(attrs={'class': 'checkbox fill-up input-navigation'}))
date_from = forms.DateField(label=_('Since'), required=True, input_formats=('%Y-%m-%d',),
widget=forms.DateInput(format='%Y-%m-%d',
attrs={'placeholder': _('Since'),
'class': 'fill-up input-navigation input-transparent '
'input-date'}))
date_to = forms.DateField(label=_('Until'), required=False, input_formats=('%Y-%m-%d',),
widget=forms.DateInput(format='%Y-%m-%d',
attrs={'placeholder': _('Until'),
'class': 'fill-up input-navigation input-transparent '
'input-date'}))
def __init__(self, data, **kwargs):
if 'date_from' not in data:
data['date_from'] = (date.today() - timedelta(**self.DEFAULT_DATE_FROM)).strftime('%Y-%m-%d')
super(BaseTaskLogFilterForm, self).__init__(data, **kwargs)
def clean(self):
cleaned_data = super(BaseTaskLogFilterForm, self).clean()
if not self.errors:
self._ser = ser = TaskLogFilterSerializer(data=cleaned_data)
if not ser.is_valid():
self._errors = ser.errors
return cleaned_data
def get_filters(self, pending_tasks=()):
return self._ser.get_filters(pending_tasks=pending_tasks)
class TaskLogObjectFilterForm(forms.Form):
object_type = forms.ChoiceField(label=_('Object type'), required=False, choices=TASK_OBJECT_TYPES,
widget=forms.Select(attrs={'class': 'fill-up input-navigation select-transparent'}))
object_name = forms.CharField(label=_('Object name'), required=False, max_length=2048,
widget=forms.TextInput(attrs={
'placeholder': _('Object name'),
'class': 'fill-up input-navigation input-transparent'}))
class TaskLogFilterForm(TaskLogObjectFilterForm, BaseTaskLogFilterForm):
pass | gui/tasklog/forms.py | from datetime import date, timedelta
from django import forms
from django.utils.translation import ugettext_lazy as _
from frozendict import frozendict
from api.task.serializers import TASK_STATES, TASK_OBJECT_TYPES, TaskLogFilterSerializer
class BaseTaskLogFilterForm(forms.Form):
DEFAULT_DATE_FROM = frozendict({'days': 15})
_ser = None
status = forms.ChoiceField(label=_('Status'), required=False, choices=TASK_STATES,
widget=forms.Select(attrs={'class': 'fill-up input-navigation select-transparent'}))
show_running = forms.BooleanField(label=_('Show only running tasks'), required=False,
widget=forms.CheckboxInput(attrs={'class': 'checkbox fill-up input-navigation'}))
hide_auto = forms.BooleanField(label=_('Hide automatic tasks'), required=False,
widget=forms.CheckboxInput(attrs={'class': 'checkbox fill-up input-navigation'}))
date_from = forms.DateField(label=_('Since'), required=True, input_formats=('%Y-%m-%d',),
widget=forms.DateInput(format='%Y-%m-%d',
attrs={'placeholder': _('Since'),
'class': 'fill-up input-navigation input-transparent '
'input-date'}))
date_to = forms.DateField(label=_('Until'), required=False, input_formats=('%Y-%m-%d',),
widget=forms.DateInput(format='%Y-%m-%d',
attrs={'placeholder': _('Until'),
'class': 'fill-up input-navigation input-transparent '
'input-date'}))
def __init__(self, data, **kwargs):
if 'date_from' not in data:
data['date_from'] = (date.today() - timedelta(**self.DEFAULT_DATE_FROM)).strftime('%Y-%m-%d')
super(BaseTaskLogFilterForm, self).__init__(data, **kwargs)
def clean(self):
cleaned_data = super(BaseTaskLogFilterForm, self).clean()
if not self.errors:
self._ser = ser = TaskLogFilterSerializer(data=cleaned_data)
if not ser.is_valid():
self._errors = ser.errors
return cleaned_data
def get_filters(self, pending_tasks=()):
return self._ser.get_filters(pending_tasks=pending_tasks)
class TaskLogObjectFilterForm(forms.Form):
object_type = forms.ChoiceField(label=_('Object type'), required=False, choices=TASK_OBJECT_TYPES,
widget=forms.Select(attrs={'class': 'fill-up input-navigation select-transparent'}))
object_name = forms.CharField(label=_('Object name'), required=False, max_length=2048,
widget=forms.TextInput(attrs={
'placeholder': _('Object name'),
'class': 'fill-up input-navigation input-transparent'}))
class TaskLogFilterForm(TaskLogObjectFilterForm, BaseTaskLogFilterForm):
pass | 0.619011 | 0.064535 |
import asyncio
import io
import logging
import time
from asyncio import Lock, StreamReader, StreamWriter
from typing import Dict, List, Optional, Tuple
from lib.chiavdf.inkfish.classgroup import ClassGroup
from lib.chiavdf.inkfish.create_discriminant import create_discriminant
from lib.chiavdf.inkfish.proof_of_time import check_proof_of_time_nwesolowski
from src.consensus.constants import constants
from src.protocols import timelord_protocol
from src.server.outbound_message import Delivery, Message, NodeType, OutboundMessage
from src.types.classgroup import ClassgroupElement
from src.types.proof_of_time import ProofOfTime
from src.types.sized_bytes import bytes32
from src.util.api_decorators import api_request
from src.util.ints import uint8, uint64
log = logging.getLogger(__name__)
class Timelord:
def __init__(self, config: Dict):
self.config: Dict = config
self.ips_estimate = {
k: v
for k, v in list(
zip(
self.config["vdf_clients"]["ip"],
self.config["vdf_clients"]["ips_estimate"],
)
)
}
self.lock: Lock = Lock()
self.active_discriminants: Dict[bytes32, Tuple[StreamWriter, uint64, str]] = {}
self.best_weight_three_proofs: int = -1
self.active_discriminants_start_time: Dict = {}
self.pending_iters: Dict = {}
self.submitted_iters: Dict = {}
self.done_discriminants: List[bytes32] = []
self.proofs_to_write: List[OutboundMessage] = []
self.seen_discriminants: List[bytes32] = []
self.proof_count: Dict = {}
self.avg_ips: Dict = {}
self.discriminant_queue: List[Tuple[bytes32, uint64]] = []
self.max_connection_time = self.config["max_connection_time"]
self.potential_free_clients: List = []
self.free_clients: List[Tuple[str, StreamReader, StreamWriter]] = []
self._is_shutdown = False
async def _handle_client(self, reader: StreamReader, writer: StreamWriter):
async with self.lock:
client_ip = writer.get_extra_info('peername')[0]
log.info(f"New timelord connection from client: {client_ip}.")
if client_ip in self.ips_estimate.keys():
self.free_clients.append((client_ip, reader, writer))
log.info(f"Added new VDF client {client_ip}.")
for ip, end_time in list(self.potential_free_clients):
if ip == client_ip:
self.potential_free_clients.remove((ip, end_time))
break
async def _shutdown(self):
async with self.lock:
for (
stop_discriminant,
(stop_writer, _, _),
) in self.active_discriminants.items():
stop_writer.write(b"010")
await stop_writer.drain()
self.done_discriminants.append(stop_discriminant)
self.active_discriminants.clear()
self.active_discriminants_start_time.clear()
self._is_shutdown = True
async def _stop_worst_process(self, worst_weight_active):
# This is already inside a lock, no need to lock again.
log.info(f"Stopping one process at weight {worst_weight_active}")
stop_writer: Optional[StreamWriter] = None
stop_discriminant: Optional[bytes32] = None
low_weights = {
k: v
for k, v in self.active_discriminants.items()
if v[1] == worst_weight_active
}
no_iters = {
k: v
for k, v in low_weights.items()
if k not in self.pending_iters or len(self.pending_iters[k]) == 0
}
# If we have process(es) with no iters, stop the one that started the latest
if len(no_iters) > 0:
latest_start_time = max(
[self.active_discriminants_start_time[k] for k, _ in no_iters.items()]
)
stop_discriminant, stop_writer = next(
(k, v[0])
for k, v in no_iters.items()
if self.active_discriminants_start_time[k] == latest_start_time
)
else:
# Otherwise, pick the one that finishes one proof the latest.
best_iter = {k: min(self.pending_iters[k]) for k, _ in low_weights.items()}
time_taken = {
k: time.time() - self.active_discriminants_start_time[k]
for k, _ in low_weights.items()
}
client_ip = [v[2] for _, v in low_weights.items()]
# ips maps an IP to the expected iterations per second of it.
ips = {}
for ip in client_ip:
if ip in self.avg_ips:
current_ips, _ = self.avg_ips[ip]
ips[ip] = current_ips
else:
ips[ip] = self.ips_estimate[ip]
expected_finish = {
k: max(0, (best_iter[k] - time_taken[k] * ips[v[2]]) / ips[v[2]])
for k, v in low_weights.items()
}
worst_finish = max([v for v in expected_finish.values()])
log.info(f"Worst finish time: {worst_finish}s")
stop_discriminant, stop_writer = next(
(k, v[0])
for k, v in low_weights.items()
if expected_finish[k] == worst_finish
)
assert stop_writer is not None
_, _, stop_ip = self.active_discriminants[stop_discriminant]
self.potential_free_clients.append((stop_ip, time.time()))
stop_writer.write(b"010")
await stop_writer.drain()
del self.active_discriminants[stop_discriminant]
del self.active_discriminants_start_time[stop_discriminant]
self.done_discriminants.append(stop_discriminant)
async def _update_avg_ips(self, challenge_hash, iterations_needed, ip):
async with self.lock:
if challenge_hash in self.active_discriminants:
time_taken = (
time.time() - self.active_discriminants_start_time[challenge_hash]
)
ips = int(iterations_needed / time_taken * 10) / 10
log.info(
f"Finished PoT, chall:{challenge_hash[:10].hex()}.."
f" {iterations_needed} iters. {int(time_taken*1000)/1000}s, {ips} ips"
)
if ip not in self.avg_ips:
self.avg_ips[ip] = (ips, 1)
else:
prev_avg_ips, trials = self.avg_ips[ip]
new_avg_ips = int((prev_avg_ips * trials + ips) / (trials + 1))
self.avg_ips[ip] = (new_avg_ips, trials + 1)
log.info(f"New estimate: {new_avg_ips}")
self.pending_iters[challenge_hash].remove(iterations_needed)
else:
log.info(
f"Finished PoT chall:{challenge_hash[:10].hex()}.. {iterations_needed}"
f" iters. But challenge not active anymore"
)
async def _update_proofs_count(self, challenge_weight):
async with self.lock:
if challenge_weight not in self.proof_count:
self.proof_count[challenge_weight] = 1
else:
self.proof_count[challenge_weight] += 1
if self.proof_count[challenge_weight] >= 3:
log.info("Cleaning up clients.")
self.best_weight_three_proofs = max(
self.best_weight_three_proofs, challenge_weight
)
for active_disc in list(self.active_discriminants):
current_writer, current_weight, ip = self.active_discriminants[
active_disc
]
if current_weight <= challenge_weight:
log.info(f"Active weight cleanup: {current_weight}")
log.info(f"Cleanup weight: {challenge_weight}")
self.potential_free_clients.append((ip, time.time()))
current_writer.write(b"010")
await current_writer.drain()
del self.active_discriminants[active_disc]
del self.active_discriminants_start_time[active_disc]
self.done_discriminants.append(active_disc)
async def _send_iterations(self, challenge_hash, writer):
alive_discriminant = True
while alive_discriminant:
async with self.lock:
if (challenge_hash in self.active_discriminants) and (
challenge_hash in self.pending_iters
):
if challenge_hash not in self.submitted_iters:
self.submitted_iters[challenge_hash] = []
for iter in sorted(self.pending_iters[challenge_hash]):
if iter in self.submitted_iters[challenge_hash]:
continue
self.submitted_iters[challenge_hash].append(iter)
if len(str(iter)) < 10:
iter_size = "0" + str(len(str(iter)))
else:
iter_size = str(len(str(iter)))
writer.write((iter_size + str(iter)).encode())
await writer.drain()
log.info(f"New iteration submitted: {iter}")
await asyncio.sleep(1)
async with self.lock:
if challenge_hash in self.done_discriminants:
alive_discriminant = False
async def _do_process_communication(
self, challenge_hash, challenge_weight, ip, reader, writer
):
disc: int = create_discriminant(
challenge_hash, constants["DISCRIMINANT_SIZE_BITS"]
)
writer.write((str(len(str(disc))) + str(disc)).encode())
await writer.drain()
try:
ok = await reader.readexactly(2)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
if challenge_hash not in self.done_discriminants:
self.done_discriminants.append(challenge_hash)
return
if ok.decode() != "OK":
return
log.info("Got handshake with VDF client.")
async with self.lock:
self.active_discriminants[challenge_hash] = (writer, challenge_weight, ip)
self.active_discriminants_start_time[challenge_hash] = time.time()
asyncio.create_task(self._send_iterations(challenge_hash, writer))
# Listen to the client until "STOP" is received.
while True:
try:
data = await reader.readexactly(4)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
if challenge_hash in self.active_discriminants:
del self.active_discriminants[challenge_hash]
if challenge_hash in self.active_discriminants_start_time:
del self.active_discriminants_start_time[challenge_hash]
if challenge_hash not in self.done_discriminants:
self.done_discriminants.append(challenge_hash)
break
if data.decode() == "STOP":
log.info(f"Stopped client running on ip {ip}.")
async with self.lock:
writer.write(b"ACK")
await writer.drain()
break
else:
try:
# This must be a proof, read the continuation.
proof = await reader.readexactly(1860)
stdout_bytes_io: io.BytesIO = io.BytesIO(
bytes.fromhex(data.decode() + proof.decode())
)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
if challenge_hash in self.active_discriminants:
del self.active_discriminants[challenge_hash]
if challenge_hash in self.active_discriminants_start_time:
del self.active_discriminants_start_time[challenge_hash]
if challenge_hash not in self.done_discriminants:
self.done_discriminants.append(challenge_hash)
break
iterations_needed = uint64(
int.from_bytes(stdout_bytes_io.read(8), "big", signed=True)
)
y = ClassgroupElement.parse(stdout_bytes_io)
proof_bytes: bytes = stdout_bytes_io.read()
# Verifies our own proof just in case
proof_blob = (
ClassGroup.from_ab_discriminant(y.a, y.b, disc).serialize()
+ proof_bytes
)
x = ClassGroup.from_ab_discriminant(2, 1, disc)
if not check_proof_of_time_nwesolowski(
disc,
x,
proof_blob,
iterations_needed,
constants["DISCRIMINANT_SIZE_BITS"],
self.config["n_wesolowski"],
):
log.error("My proof is incorrect!")
output = ClassgroupElement(y.a, y.b)
proof_of_time = ProofOfTime(
challenge_hash,
iterations_needed,
output,
self.config["n_wesolowski"],
[uint8(b) for b in proof_bytes],
)
response = timelord_protocol.ProofOfTimeFinished(proof_of_time)
await self._update_avg_ips(challenge_hash, iterations_needed, ip)
async with self.lock:
self.proofs_to_write.append(
OutboundMessage(
NodeType.FULL_NODE,
Message("proof_of_time_finished", response),
Delivery.BROADCAST,
)
)
await self._update_proofs_count(challenge_weight)
async def _manage_discriminant_queue(self):
while not self._is_shutdown:
async with self.lock:
if len(self.discriminant_queue) > 0:
max_weight = max([h for _, h in self.discriminant_queue])
if max_weight <= self.best_weight_three_proofs:
self.done_discriminants.extend(
[d for d, _ in self.discriminant_queue]
)
self.discriminant_queue.clear()
else:
max_weight_disc = [
d for d, h in self.discriminant_queue if h == max_weight
]
with_iters = [
d
for d in max_weight_disc
if d in self.pending_iters
and len(self.pending_iters[d]) != 0
]
if len(with_iters) == 0:
disc = max_weight_disc[0]
else:
min_iter = min(
[min(self.pending_iters[d]) for d in with_iters]
)
disc = next(
d
for d in with_iters
if min(self.pending_iters[d]) == min_iter
)
if len(self.free_clients) != 0:
ip, sr, sw = self.free_clients[0]
self.free_clients = self.free_clients[1:]
self.discriminant_queue.remove((disc, max_weight))
asyncio.create_task(
self._do_process_communication(
disc, max_weight, ip, sr, sw
)
)
else:
self.potential_free_clients = [
(ip, end_time)
for ip, end_time
in self.potential_free_clients
if time.time() < end_time + self.max_connection_time
]
if (
len(self.potential_free_clients) == 0
and len(self.active_discriminants) > 0
):
worst_weight_active = min(
[
h
for (
_,
h,
_,
) in self.active_discriminants.values()
]
)
if max_weight > worst_weight_active:
await self._stop_worst_process(worst_weight_active)
elif max_weight == worst_weight_active:
if (
disc in self.pending_iters
and len(self.pending_iters[disc]) != 0
):
if any(
(
k not in self.pending_iters
or len(self.pending_iters[k]) == 0
)
for k, v in self.active_discriminants.items()
if v[1] == worst_weight_active
):
log.info(
"Stopped process without iters for one with iters."
)
await self._stop_worst_process(
worst_weight_active
)
if len(self.proofs_to_write) > 0:
for msg in self.proofs_to_write:
yield msg
self.proofs_to_write.clear()
await asyncio.sleep(0.5)
@api_request
async def challenge_start(self, challenge_start: timelord_protocol.ChallengeStart):
"""
The full node notifies the timelord node that a new challenge is active, and work
should be started on it. We add the challenge into the queue if it's worth it to have.
"""
async with self.lock:
if challenge_start.challenge_hash in self.seen_discriminants:
log.info(
f"Have already seen this challenge hash {challenge_start.challenge_hash}. Ignoring."
)
return
if challenge_start.weight <= self.best_weight_three_proofs:
log.info("Not starting challenge, already three proofs at that weight")
return
self.seen_discriminants.append(challenge_start.challenge_hash)
self.discriminant_queue.append(
(challenge_start.challenge_hash, challenge_start.weight)
)
log.info("Appended to discriminant queue.")
@api_request
async def proof_of_space_info(
self, proof_of_space_info: timelord_protocol.ProofOfSpaceInfo
):
"""
Notification from full node about a new proof of space for a challenge. If we already
have a process for this challenge, we should communicate to the process to tell it how
many iterations to run for.
"""
async with self.lock:
log.info(
f"proof_of_space_info {proof_of_space_info.challenge_hash} {proof_of_space_info.iterations_needed}"
)
if proof_of_space_info.challenge_hash in self.done_discriminants:
log.info(
f"proof_of_space_info {proof_of_space_info.challenge_hash} already done, returning"
)
return
if proof_of_space_info.challenge_hash not in self.pending_iters:
self.pending_iters[proof_of_space_info.challenge_hash] = []
if proof_of_space_info.challenge_hash not in self.submitted_iters:
self.submitted_iters[proof_of_space_info.challenge_hash] = []
if (
proof_of_space_info.iterations_needed
not in self.pending_iters[proof_of_space_info.challenge_hash]
and proof_of_space_info.iterations_needed
not in self.submitted_iters[proof_of_space_info.challenge_hash]
):
log.info(
f"proof_of_space_info {proof_of_space_info.challenge_hash} adding "
f"{proof_of_space_info.iterations_needed} to "
f"{self.pending_iters[proof_of_space_info.challenge_hash]}"
)
self.pending_iters[proof_of_space_info.challenge_hash].append(
proof_of_space_info.iterations_needed
) | src/timelord.py | import asyncio
import io
import logging
import time
from asyncio import Lock, StreamReader, StreamWriter
from typing import Dict, List, Optional, Tuple
from lib.chiavdf.inkfish.classgroup import ClassGroup
from lib.chiavdf.inkfish.create_discriminant import create_discriminant
from lib.chiavdf.inkfish.proof_of_time import check_proof_of_time_nwesolowski
from src.consensus.constants import constants
from src.protocols import timelord_protocol
from src.server.outbound_message import Delivery, Message, NodeType, OutboundMessage
from src.types.classgroup import ClassgroupElement
from src.types.proof_of_time import ProofOfTime
from src.types.sized_bytes import bytes32
from src.util.api_decorators import api_request
from src.util.ints import uint8, uint64
log = logging.getLogger(__name__)
class Timelord:
def __init__(self, config: Dict):
self.config: Dict = config
self.ips_estimate = {
k: v
for k, v in list(
zip(
self.config["vdf_clients"]["ip"],
self.config["vdf_clients"]["ips_estimate"],
)
)
}
self.lock: Lock = Lock()
self.active_discriminants: Dict[bytes32, Tuple[StreamWriter, uint64, str]] = {}
self.best_weight_three_proofs: int = -1
self.active_discriminants_start_time: Dict = {}
self.pending_iters: Dict = {}
self.submitted_iters: Dict = {}
self.done_discriminants: List[bytes32] = []
self.proofs_to_write: List[OutboundMessage] = []
self.seen_discriminants: List[bytes32] = []
self.proof_count: Dict = {}
self.avg_ips: Dict = {}
self.discriminant_queue: List[Tuple[bytes32, uint64]] = []
self.max_connection_time = self.config["max_connection_time"]
self.potential_free_clients: List = []
self.free_clients: List[Tuple[str, StreamReader, StreamWriter]] = []
self._is_shutdown = False
async def _handle_client(self, reader: StreamReader, writer: StreamWriter):
async with self.lock:
client_ip = writer.get_extra_info('peername')[0]
log.info(f"New timelord connection from client: {client_ip}.")
if client_ip in self.ips_estimate.keys():
self.free_clients.append((client_ip, reader, writer))
log.info(f"Added new VDF client {client_ip}.")
for ip, end_time in list(self.potential_free_clients):
if ip == client_ip:
self.potential_free_clients.remove((ip, end_time))
break
async def _shutdown(self):
async with self.lock:
for (
stop_discriminant,
(stop_writer, _, _),
) in self.active_discriminants.items():
stop_writer.write(b"010")
await stop_writer.drain()
self.done_discriminants.append(stop_discriminant)
self.active_discriminants.clear()
self.active_discriminants_start_time.clear()
self._is_shutdown = True
async def _stop_worst_process(self, worst_weight_active):
# This is already inside a lock, no need to lock again.
log.info(f"Stopping one process at weight {worst_weight_active}")
stop_writer: Optional[StreamWriter] = None
stop_discriminant: Optional[bytes32] = None
low_weights = {
k: v
for k, v in self.active_discriminants.items()
if v[1] == worst_weight_active
}
no_iters = {
k: v
for k, v in low_weights.items()
if k not in self.pending_iters or len(self.pending_iters[k]) == 0
}
# If we have process(es) with no iters, stop the one that started the latest
if len(no_iters) > 0:
latest_start_time = max(
[self.active_discriminants_start_time[k] for k, _ in no_iters.items()]
)
stop_discriminant, stop_writer = next(
(k, v[0])
for k, v in no_iters.items()
if self.active_discriminants_start_time[k] == latest_start_time
)
else:
# Otherwise, pick the one that finishes one proof the latest.
best_iter = {k: min(self.pending_iters[k]) for k, _ in low_weights.items()}
time_taken = {
k: time.time() - self.active_discriminants_start_time[k]
for k, _ in low_weights.items()
}
client_ip = [v[2] for _, v in low_weights.items()]
# ips maps an IP to the expected iterations per second of it.
ips = {}
for ip in client_ip:
if ip in self.avg_ips:
current_ips, _ = self.avg_ips[ip]
ips[ip] = current_ips
else:
ips[ip] = self.ips_estimate[ip]
expected_finish = {
k: max(0, (best_iter[k] - time_taken[k] * ips[v[2]]) / ips[v[2]])
for k, v in low_weights.items()
}
worst_finish = max([v for v in expected_finish.values()])
log.info(f"Worst finish time: {worst_finish}s")
stop_discriminant, stop_writer = next(
(k, v[0])
for k, v in low_weights.items()
if expected_finish[k] == worst_finish
)
assert stop_writer is not None
_, _, stop_ip = self.active_discriminants[stop_discriminant]
self.potential_free_clients.append((stop_ip, time.time()))
stop_writer.write(b"010")
await stop_writer.drain()
del self.active_discriminants[stop_discriminant]
del self.active_discriminants_start_time[stop_discriminant]
self.done_discriminants.append(stop_discriminant)
async def _update_avg_ips(self, challenge_hash, iterations_needed, ip):
async with self.lock:
if challenge_hash in self.active_discriminants:
time_taken = (
time.time() - self.active_discriminants_start_time[challenge_hash]
)
ips = int(iterations_needed / time_taken * 10) / 10
log.info(
f"Finished PoT, chall:{challenge_hash[:10].hex()}.."
f" {iterations_needed} iters. {int(time_taken*1000)/1000}s, {ips} ips"
)
if ip not in self.avg_ips:
self.avg_ips[ip] = (ips, 1)
else:
prev_avg_ips, trials = self.avg_ips[ip]
new_avg_ips = int((prev_avg_ips * trials + ips) / (trials + 1))
self.avg_ips[ip] = (new_avg_ips, trials + 1)
log.info(f"New estimate: {new_avg_ips}")
self.pending_iters[challenge_hash].remove(iterations_needed)
else:
log.info(
f"Finished PoT chall:{challenge_hash[:10].hex()}.. {iterations_needed}"
f" iters. But challenge not active anymore"
)
async def _update_proofs_count(self, challenge_weight):
async with self.lock:
if challenge_weight not in self.proof_count:
self.proof_count[challenge_weight] = 1
else:
self.proof_count[challenge_weight] += 1
if self.proof_count[challenge_weight] >= 3:
log.info("Cleaning up clients.")
self.best_weight_three_proofs = max(
self.best_weight_three_proofs, challenge_weight
)
for active_disc in list(self.active_discriminants):
current_writer, current_weight, ip = self.active_discriminants[
active_disc
]
if current_weight <= challenge_weight:
log.info(f"Active weight cleanup: {current_weight}")
log.info(f"Cleanup weight: {challenge_weight}")
self.potential_free_clients.append((ip, time.time()))
current_writer.write(b"010")
await current_writer.drain()
del self.active_discriminants[active_disc]
del self.active_discriminants_start_time[active_disc]
self.done_discriminants.append(active_disc)
async def _send_iterations(self, challenge_hash, writer):
alive_discriminant = True
while alive_discriminant:
async with self.lock:
if (challenge_hash in self.active_discriminants) and (
challenge_hash in self.pending_iters
):
if challenge_hash not in self.submitted_iters:
self.submitted_iters[challenge_hash] = []
for iter in sorted(self.pending_iters[challenge_hash]):
if iter in self.submitted_iters[challenge_hash]:
continue
self.submitted_iters[challenge_hash].append(iter)
if len(str(iter)) < 10:
iter_size = "0" + str(len(str(iter)))
else:
iter_size = str(len(str(iter)))
writer.write((iter_size + str(iter)).encode())
await writer.drain()
log.info(f"New iteration submitted: {iter}")
await asyncio.sleep(1)
async with self.lock:
if challenge_hash in self.done_discriminants:
alive_discriminant = False
async def _do_process_communication(
self, challenge_hash, challenge_weight, ip, reader, writer
):
disc: int = create_discriminant(
challenge_hash, constants["DISCRIMINANT_SIZE_BITS"]
)
writer.write((str(len(str(disc))) + str(disc)).encode())
await writer.drain()
try:
ok = await reader.readexactly(2)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
if challenge_hash not in self.done_discriminants:
self.done_discriminants.append(challenge_hash)
return
if ok.decode() != "OK":
return
log.info("Got handshake with VDF client.")
async with self.lock:
self.active_discriminants[challenge_hash] = (writer, challenge_weight, ip)
self.active_discriminants_start_time[challenge_hash] = time.time()
asyncio.create_task(self._send_iterations(challenge_hash, writer))
# Listen to the client until "STOP" is received.
while True:
try:
data = await reader.readexactly(4)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
if challenge_hash in self.active_discriminants:
del self.active_discriminants[challenge_hash]
if challenge_hash in self.active_discriminants_start_time:
del self.active_discriminants_start_time[challenge_hash]
if challenge_hash not in self.done_discriminants:
self.done_discriminants.append(challenge_hash)
break
if data.decode() == "STOP":
log.info(f"Stopped client running on ip {ip}.")
async with self.lock:
writer.write(b"ACK")
await writer.drain()
break
else:
try:
# This must be a proof, read the continuation.
proof = await reader.readexactly(1860)
stdout_bytes_io: io.BytesIO = io.BytesIO(
bytes.fromhex(data.decode() + proof.decode())
)
except (asyncio.IncompleteReadError, ConnectionResetError, Exception) as e:
log.warning(f"{type(e)} {e}")
async with self.lock:
if challenge_hash in self.active_discriminants:
del self.active_discriminants[challenge_hash]
if challenge_hash in self.active_discriminants_start_time:
del self.active_discriminants_start_time[challenge_hash]
if challenge_hash not in self.done_discriminants:
self.done_discriminants.append(challenge_hash)
break
iterations_needed = uint64(
int.from_bytes(stdout_bytes_io.read(8), "big", signed=True)
)
y = ClassgroupElement.parse(stdout_bytes_io)
proof_bytes: bytes = stdout_bytes_io.read()
# Verifies our own proof just in case
proof_blob = (
ClassGroup.from_ab_discriminant(y.a, y.b, disc).serialize()
+ proof_bytes
)
x = ClassGroup.from_ab_discriminant(2, 1, disc)
if not check_proof_of_time_nwesolowski(
disc,
x,
proof_blob,
iterations_needed,
constants["DISCRIMINANT_SIZE_BITS"],
self.config["n_wesolowski"],
):
log.error("My proof is incorrect!")
output = ClassgroupElement(y.a, y.b)
proof_of_time = ProofOfTime(
challenge_hash,
iterations_needed,
output,
self.config["n_wesolowski"],
[uint8(b) for b in proof_bytes],
)
response = timelord_protocol.ProofOfTimeFinished(proof_of_time)
await self._update_avg_ips(challenge_hash, iterations_needed, ip)
async with self.lock:
self.proofs_to_write.append(
OutboundMessage(
NodeType.FULL_NODE,
Message("proof_of_time_finished", response),
Delivery.BROADCAST,
)
)
await self._update_proofs_count(challenge_weight)
async def _manage_discriminant_queue(self):
while not self._is_shutdown:
async with self.lock:
if len(self.discriminant_queue) > 0:
max_weight = max([h for _, h in self.discriminant_queue])
if max_weight <= self.best_weight_three_proofs:
self.done_discriminants.extend(
[d for d, _ in self.discriminant_queue]
)
self.discriminant_queue.clear()
else:
max_weight_disc = [
d for d, h in self.discriminant_queue if h == max_weight
]
with_iters = [
d
for d in max_weight_disc
if d in self.pending_iters
and len(self.pending_iters[d]) != 0
]
if len(with_iters) == 0:
disc = max_weight_disc[0]
else:
min_iter = min(
[min(self.pending_iters[d]) for d in with_iters]
)
disc = next(
d
for d in with_iters
if min(self.pending_iters[d]) == min_iter
)
if len(self.free_clients) != 0:
ip, sr, sw = self.free_clients[0]
self.free_clients = self.free_clients[1:]
self.discriminant_queue.remove((disc, max_weight))
asyncio.create_task(
self._do_process_communication(
disc, max_weight, ip, sr, sw
)
)
else:
self.potential_free_clients = [
(ip, end_time)
for ip, end_time
in self.potential_free_clients
if time.time() < end_time + self.max_connection_time
]
if (
len(self.potential_free_clients) == 0
and len(self.active_discriminants) > 0
):
worst_weight_active = min(
[
h
for (
_,
h,
_,
) in self.active_discriminants.values()
]
)
if max_weight > worst_weight_active:
await self._stop_worst_process(worst_weight_active)
elif max_weight == worst_weight_active:
if (
disc in self.pending_iters
and len(self.pending_iters[disc]) != 0
):
if any(
(
k not in self.pending_iters
or len(self.pending_iters[k]) == 0
)
for k, v in self.active_discriminants.items()
if v[1] == worst_weight_active
):
log.info(
"Stopped process without iters for one with iters."
)
await self._stop_worst_process(
worst_weight_active
)
if len(self.proofs_to_write) > 0:
for msg in self.proofs_to_write:
yield msg
self.proofs_to_write.clear()
await asyncio.sleep(0.5)
@api_request
async def challenge_start(self, challenge_start: timelord_protocol.ChallengeStart):
"""
The full node notifies the timelord node that a new challenge is active, and work
should be started on it. We add the challenge into the queue if it's worth it to have.
"""
async with self.lock:
if challenge_start.challenge_hash in self.seen_discriminants:
log.info(
f"Have already seen this challenge hash {challenge_start.challenge_hash}. Ignoring."
)
return
if challenge_start.weight <= self.best_weight_three_proofs:
log.info("Not starting challenge, already three proofs at that weight")
return
self.seen_discriminants.append(challenge_start.challenge_hash)
self.discriminant_queue.append(
(challenge_start.challenge_hash, challenge_start.weight)
)
log.info("Appended to discriminant queue.")
@api_request
async def proof_of_space_info(
self, proof_of_space_info: timelord_protocol.ProofOfSpaceInfo
):
"""
Notification from full node about a new proof of space for a challenge. If we already
have a process for this challenge, we should communicate to the process to tell it how
many iterations to run for.
"""
async with self.lock:
log.info(
f"proof_of_space_info {proof_of_space_info.challenge_hash} {proof_of_space_info.iterations_needed}"
)
if proof_of_space_info.challenge_hash in self.done_discriminants:
log.info(
f"proof_of_space_info {proof_of_space_info.challenge_hash} already done, returning"
)
return
if proof_of_space_info.challenge_hash not in self.pending_iters:
self.pending_iters[proof_of_space_info.challenge_hash] = []
if proof_of_space_info.challenge_hash not in self.submitted_iters:
self.submitted_iters[proof_of_space_info.challenge_hash] = []
if (
proof_of_space_info.iterations_needed
not in self.pending_iters[proof_of_space_info.challenge_hash]
and proof_of_space_info.iterations_needed
not in self.submitted_iters[proof_of_space_info.challenge_hash]
):
log.info(
f"proof_of_space_info {proof_of_space_info.challenge_hash} adding "
f"{proof_of_space_info.iterations_needed} to "
f"{self.pending_iters[proof_of_space_info.challenge_hash]}"
)
self.pending_iters[proof_of_space_info.challenge_hash].append(
proof_of_space_info.iterations_needed
) | 0.713332 | 0.184345 |
from __future__ import print_function, division, absolute_import
from astropy.time import Time
from .. import fetch
import numpy as np
from .. import cache
__all__ = ['MNF_TIME', 'times_indexes', 'DerivedParameter']
MNF_TIME = 0.25625 # Minor Frame duration (seconds)
def times_indexes(start, stop, dt):
index0 = Time(start, format='unix').unix // dt
index1 = Time(stop, format='unix').unix // dt + 1
indexes = np.arange(index0, index1, dtype=np.int64)
times = indexes * dt
return times, indexes
@cache.lru_cache(20)
def interpolate_times(keyvals, len_data_times, data_times=None, times=None):
return np.interpolate(np.arange(len_data_times),
data_times, times, method='nearest')
class DerivedParameter(object):
max_gap = 66.0 # Max allowed data gap (seconds)
max_gaps = {}
unit_system = 'eng'
dtype = None # If not None then cast to this dtype
def calc(self, data):
raise NotImplementedError
def fetch(self, start, stop):
unit_system = fetch.get_units() # cache current units and restore after fetch
fetch.set_units(self.unit_system)
dataset = fetch.MSIDset(self.rootparams, start, stop)
fetch.set_units(unit_system)
# Translate state codes "ON" and "OFF" to 1 and 0, respectively.
for data in dataset.values():
if (data.vals.dtype.name == 'string24'
and set(data.vals).issubset(set(['ON ', 'OFF']))):
data.vals = np.where(data.vals == 'OFF', np.int8(0), np.int8(1))
times, indexes = times_indexes(start, stop, self.time_step)
bads = np.zeros(len(times), dtype=np.bool) # All data OK (false)
for msidname, data in dataset.items():
# If no data are found in specified interval then stub two fake
# data points that are both bad. All interpolated points will likewise
# be bad.
if len(data) < 2:
data.vals = np.zeros(2, dtype=data.vals.dtype) # two null points
data.bads = np.ones(2, dtype=np.bool) # all points bad
data.times = np.array([times[0], times[-1]])
print('No data in {} between {} and {} (setting all bad)'
.format(msidname, Time(start, format='unix').yday, Time(stop, format='unix').yday))
keyvals = (data.content, data.times[0], data.times[-1],
len(times), times[0], times[-1])
idxs = interpolate_times(keyvals, len(data.times),
data_times=data.times, times=times)
# Loop over data attributes like "bads", "times", "vals" etc and
# perform near-neighbor interpolation by indexing
for attr in data.colnames:
vals = getattr(data, attr)
if vals is not None:
setattr(data, attr, vals[idxs])
bads = bads | data.bads
# Reject near-neighbor points more than max_gap secs from available data
max_gap = self.max_gaps.get(msidname, self.max_gap)
gap_bads = abs(data.times - times) > max_gap
if np.any(gap_bads):
print("Setting bads because of gaps in {} between {} to {}"
.format(msidname,
Time(times[gap_bads][0], format='unix').yday,
Time(times[gap_bads][-1], format='unix').yday))
bads = bads | gap_bads
dataset.times = times
dataset.bads = bads
dataset.indexes = indexes
return dataset
def __call__(self, start, stop):
dataset = fetch.MSIDset(self.rootparams, start, stop, filter_bad=True)
# Translate state codes "ON" and "OFF" to 1 and 0, respectively.
for data in dataset.values():
if (data.vals.dtype.name == 'string24'
and set(data.vals) == set(('ON ', 'OFF'))):
data.vals = np.where(data.vals == 'OFF', np.int8(0), np.int8(1))
dataset.interpolate(dt=self.time_step)
# Return calculated values. Np.asarray will copy the array only if
# dtype is not None and different from vals.dtype; otherwise a
# reference is returned.
vals = self.calc(dataset)
return np.asarray(vals, dtype=self.dtype)
@property
def mnf_step(self):
return int(round(self.time_step / MNF_TIME))
@property
def content(self):
return 'dp_{}{}'.format(self.content_root.lower(), self.mnf_step) | jeta/archive/derived/base.py | from __future__ import print_function, division, absolute_import
from astropy.time import Time
from .. import fetch
import numpy as np
from .. import cache
__all__ = ['MNF_TIME', 'times_indexes', 'DerivedParameter']
MNF_TIME = 0.25625 # Minor Frame duration (seconds)
def times_indexes(start, stop, dt):
index0 = Time(start, format='unix').unix // dt
index1 = Time(stop, format='unix').unix // dt + 1
indexes = np.arange(index0, index1, dtype=np.int64)
times = indexes * dt
return times, indexes
@cache.lru_cache(20)
def interpolate_times(keyvals, len_data_times, data_times=None, times=None):
return np.interpolate(np.arange(len_data_times),
data_times, times, method='nearest')
class DerivedParameter(object):
max_gap = 66.0 # Max allowed data gap (seconds)
max_gaps = {}
unit_system = 'eng'
dtype = None # If not None then cast to this dtype
def calc(self, data):
raise NotImplementedError
def fetch(self, start, stop):
unit_system = fetch.get_units() # cache current units and restore after fetch
fetch.set_units(self.unit_system)
dataset = fetch.MSIDset(self.rootparams, start, stop)
fetch.set_units(unit_system)
# Translate state codes "ON" and "OFF" to 1 and 0, respectively.
for data in dataset.values():
if (data.vals.dtype.name == 'string24'
and set(data.vals).issubset(set(['ON ', 'OFF']))):
data.vals = np.where(data.vals == 'OFF', np.int8(0), np.int8(1))
times, indexes = times_indexes(start, stop, self.time_step)
bads = np.zeros(len(times), dtype=np.bool) # All data OK (false)
for msidname, data in dataset.items():
# If no data are found in specified interval then stub two fake
# data points that are both bad. All interpolated points will likewise
# be bad.
if len(data) < 2:
data.vals = np.zeros(2, dtype=data.vals.dtype) # two null points
data.bads = np.ones(2, dtype=np.bool) # all points bad
data.times = np.array([times[0], times[-1]])
print('No data in {} between {} and {} (setting all bad)'
.format(msidname, Time(start, format='unix').yday, Time(stop, format='unix').yday))
keyvals = (data.content, data.times[0], data.times[-1],
len(times), times[0], times[-1])
idxs = interpolate_times(keyvals, len(data.times),
data_times=data.times, times=times)
# Loop over data attributes like "bads", "times", "vals" etc and
# perform near-neighbor interpolation by indexing
for attr in data.colnames:
vals = getattr(data, attr)
if vals is not None:
setattr(data, attr, vals[idxs])
bads = bads | data.bads
# Reject near-neighbor points more than max_gap secs from available data
max_gap = self.max_gaps.get(msidname, self.max_gap)
gap_bads = abs(data.times - times) > max_gap
if np.any(gap_bads):
print("Setting bads because of gaps in {} between {} to {}"
.format(msidname,
Time(times[gap_bads][0], format='unix').yday,
Time(times[gap_bads][-1], format='unix').yday))
bads = bads | gap_bads
dataset.times = times
dataset.bads = bads
dataset.indexes = indexes
return dataset
def __call__(self, start, stop):
dataset = fetch.MSIDset(self.rootparams, start, stop, filter_bad=True)
# Translate state codes "ON" and "OFF" to 1 and 0, respectively.
for data in dataset.values():
if (data.vals.dtype.name == 'string24'
and set(data.vals) == set(('ON ', 'OFF'))):
data.vals = np.where(data.vals == 'OFF', np.int8(0), np.int8(1))
dataset.interpolate(dt=self.time_step)
# Return calculated values. Np.asarray will copy the array only if
# dtype is not None and different from vals.dtype; otherwise a
# reference is returned.
vals = self.calc(dataset)
return np.asarray(vals, dtype=self.dtype)
@property
def mnf_step(self):
return int(round(self.time_step / MNF_TIME))
@property
def content(self):
return 'dp_{}{}'.format(self.content_root.lower(), self.mnf_step) | 0.695752 | 0.338952 |
from surprise.model_selection import GridSearchCV
def perform_grid_search(data, model_dict, param_grid, cv):
"""
Return list of trained models in GridSearch
:param data: Trainset to use in model training
:param model_dict: Dictionary with list of models(and their names) to be used
:param param_grid: Dictionary with algorithm parameters
:param cv: Determines the cross-validation splitting. By default 5-fold.
:return List of trained models
"""
results_list = []
for name, model in model_dict.items():
grid = GridSearchCV(model, param_grid, cv=cv, n_jobs=-1)
grid.fit(data)
print(f'Best parameters for model {name} are {grid.best_params}')
results_list.append((name, grid))
return results_list
def grid_knn(data, model_dict, k_list, min_k_list, similarities_list, user_based, cv=5):
"""
Return list of trained models in GridSearch for KNN algorithms
:param data: Trainset to use in model training
:param model_dict: Dictionary with list of models(and their names) to be used
:param k_list: List of k values
:param min_k_list: List of min_k values
:param similarities_list: List of similarity measures
:param user_based: Defines user-based or item-based approach
:param cv: Determines the cross-validation splitting. By default 5-fold.
:return List of trained models
"""
# Define param grid
param_grid = dict()
param_grid['k'] = k_list
param_grid['min_k'] = min_k_list
param_grid['sim_options'] = dict()
param_grid['sim_options']['name'] = similarities_list
param_grid['sim_options']['user_based'] = [user_based]
results = perform_grid_search(data, model_dict, param_grid, cv)
return results
def grid_matrix_fact(data, model_dict, n_epochs, n_factors, lr_all, reg_all, cv=5):
"""
Return list of trained models in GridSearch for SVD algorithm
:param data: Trainset to use in model training
:param model_dict: Dictionary with list of models(and their names) to be used
:param n_epochs: List of n_epochs values
:param n_factors: List of n_factors values
:param lr_all: List of lr_all values
:param reg_all: List of reg_all values
:param cv: Determines the cross-validation splitting. By default 5-fold.
:return List of trained models
"""
# Define param grid
param_grid = dict()
param_grid['n_epochs'] = n_epochs
param_grid['lr_all'] = lr_all
param_grid['n_factors'] = n_factors
param_grid['reg_all'] = reg_all
results = perform_grid_search(data, model_dict, param_grid, cv)
return results | grid_search.py |
from surprise.model_selection import GridSearchCV
def perform_grid_search(data, model_dict, param_grid, cv):
"""
Return list of trained models in GridSearch
:param data: Trainset to use in model training
:param model_dict: Dictionary with list of models(and their names) to be used
:param param_grid: Dictionary with algorithm parameters
:param cv: Determines the cross-validation splitting. By default 5-fold.
:return List of trained models
"""
results_list = []
for name, model in model_dict.items():
grid = GridSearchCV(model, param_grid, cv=cv, n_jobs=-1)
grid.fit(data)
print(f'Best parameters for model {name} are {grid.best_params}')
results_list.append((name, grid))
return results_list
def grid_knn(data, model_dict, k_list, min_k_list, similarities_list, user_based, cv=5):
"""
Return list of trained models in GridSearch for KNN algorithms
:param data: Trainset to use in model training
:param model_dict: Dictionary with list of models(and their names) to be used
:param k_list: List of k values
:param min_k_list: List of min_k values
:param similarities_list: List of similarity measures
:param user_based: Defines user-based or item-based approach
:param cv: Determines the cross-validation splitting. By default 5-fold.
:return List of trained models
"""
# Define param grid
param_grid = dict()
param_grid['k'] = k_list
param_grid['min_k'] = min_k_list
param_grid['sim_options'] = dict()
param_grid['sim_options']['name'] = similarities_list
param_grid['sim_options']['user_based'] = [user_based]
results = perform_grid_search(data, model_dict, param_grid, cv)
return results
def grid_matrix_fact(data, model_dict, n_epochs, n_factors, lr_all, reg_all, cv=5):
"""
Return list of trained models in GridSearch for SVD algorithm
:param data: Trainset to use in model training
:param model_dict: Dictionary with list of models(and their names) to be used
:param n_epochs: List of n_epochs values
:param n_factors: List of n_factors values
:param lr_all: List of lr_all values
:param reg_all: List of reg_all values
:param cv: Determines the cross-validation splitting. By default 5-fold.
:return List of trained models
"""
# Define param grid
param_grid = dict()
param_grid['n_epochs'] = n_epochs
param_grid['lr_all'] = lr_all
param_grid['n_factors'] = n_factors
param_grid['reg_all'] = reg_all
results = perform_grid_search(data, model_dict, param_grid, cv)
return results | 0.821939 | 0.47859 |
import time
import spdlog as spd
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from buzzblog.gen import TLikeService
def instrumented(func):
""" TODO : Method description """
def func_wrapper(self, request_metadata, *args, **kwargs):
start_time = time.monotonic()
ret = func(self, request_metadata, *args, **kwargs)
latency = time.monotonic() - start_time
try:
logger = spd.get("logger")
logger.info(f'request_id={request_metadata.id} server={self._ip_address}:{self._port} \
function=like:{func.__name__} latency={latency:.9f}')
except NotImplementedError:
pass
return ret
return func_wrapper
class Client:
""" TODO : Class description """
def __init__(self, ip_address, port, timeout=10000):
self._ip_address = ip_address
self._port = port
self._socket = TSocket.TSocket(ip_address, port)
self._socket.setTimeout(timeout)
self._transport = TTransport.TBufferedTransport(self._socket)
self._protocol = TBinaryProtocol.TBinaryProtocol(self._transport)
self._tclient = TLikeService.Client(self._protocol)
self._transport.open()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
self.close()
def __del__(self):
self.close()
def close(self):
""" TODO : Method description """
if self._transport.isOpen():
self._transport.close()
@instrumented
def like_post(self, request_metadata, post_id):
""" TODO : Method description """
return self._tclient.like_post(request_metadata=request_metadata,
post_id=post_id)
@instrumented
def retrieve_standard_like(self, request_metadata, like_id):
""" TODO : Method description """
return self._tclient.retrieve_standard_like(
request_metadata=request_metadata, like_id=like_id)
@instrumented
def retrieve_expanded_like(self, request_metadata, like_id):
""" TODO : Method description """
return self._tclient.retrieve_expanded_like(
request_metadata=request_metadata, like_id=like_id)
@instrumented
def delete_like(self, request_metadata, like_id):
""" TODO : Method description """
return self._tclient.delete_like(request_metadata=request_metadata,
like_id=like_id)
@instrumented
def list_likes(self, request_metadata, query, limit, offset):
""" TODO : Method description """
return self._tclient.list_likes(request_metadata=request_metadata,
query=query, limit=limit, offset=offset)
@instrumented
def count_likes_by_account(self, request_metadata, account_id):
""" TODO : Method description """
return self._tclient.count_likes_by_account(
request_metadata=request_metadata, account_id=account_id)
@instrumented
def count_likes_of_post(self, request_metadata, post_id):
""" TODO : Method description """
return self._tclient.count_likes_of_post(request_metadata=request_metadata,
post_id=post_id) | app/uniquepair/service/tests/site-packages/buzzblog/like_client.py | import time
import spdlog as spd
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from buzzblog.gen import TLikeService
def instrumented(func):
""" TODO : Method description """
def func_wrapper(self, request_metadata, *args, **kwargs):
start_time = time.monotonic()
ret = func(self, request_metadata, *args, **kwargs)
latency = time.monotonic() - start_time
try:
logger = spd.get("logger")
logger.info(f'request_id={request_metadata.id} server={self._ip_address}:{self._port} \
function=like:{func.__name__} latency={latency:.9f}')
except NotImplementedError:
pass
return ret
return func_wrapper
class Client:
""" TODO : Class description """
def __init__(self, ip_address, port, timeout=10000):
self._ip_address = ip_address
self._port = port
self._socket = TSocket.TSocket(ip_address, port)
self._socket.setTimeout(timeout)
self._transport = TTransport.TBufferedTransport(self._socket)
self._protocol = TBinaryProtocol.TBinaryProtocol(self._transport)
self._tclient = TLikeService.Client(self._protocol)
self._transport.open()
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, exception_traceback):
self.close()
def __del__(self):
self.close()
def close(self):
""" TODO : Method description """
if self._transport.isOpen():
self._transport.close()
@instrumented
def like_post(self, request_metadata, post_id):
""" TODO : Method description """
return self._tclient.like_post(request_metadata=request_metadata,
post_id=post_id)
@instrumented
def retrieve_standard_like(self, request_metadata, like_id):
""" TODO : Method description """
return self._tclient.retrieve_standard_like(
request_metadata=request_metadata, like_id=like_id)
@instrumented
def retrieve_expanded_like(self, request_metadata, like_id):
""" TODO : Method description """
return self._tclient.retrieve_expanded_like(
request_metadata=request_metadata, like_id=like_id)
@instrumented
def delete_like(self, request_metadata, like_id):
""" TODO : Method description """
return self._tclient.delete_like(request_metadata=request_metadata,
like_id=like_id)
@instrumented
def list_likes(self, request_metadata, query, limit, offset):
""" TODO : Method description """
return self._tclient.list_likes(request_metadata=request_metadata,
query=query, limit=limit, offset=offset)
@instrumented
def count_likes_by_account(self, request_metadata, account_id):
""" TODO : Method description """
return self._tclient.count_likes_by_account(
request_metadata=request_metadata, account_id=account_id)
@instrumented
def count_likes_of_post(self, request_metadata, post_id):
""" TODO : Method description """
return self._tclient.count_likes_of_post(request_metadata=request_metadata,
post_id=post_id) | 0.28398 | 0.071235 |
from __future__ import absolute_import, division, unicode_literals
from six import string_types
import param
import numpy as np
from bokeh.models import CustomJS
from bokeh.models.formatters import TickFormatter
from bokeh.models.widgets import (
DateSlider as _BkDateSlider, DateRangeSlider as _BkDateRangeSlider,
RangeSlider as _BkRangeSlider, Slider as _BkSlider)
from ..config import config
from ..io import state
from ..util import unicode_repr, value_as_datetime, value_as_date
from ..viewable import Layoutable
from .base import Widget, CompositeWidget
from ..layout import Column
from .input import StaticText
class _SliderBase(Widget):
bar_color = param.Color(default="#e6e6e6", doc="""
Color of the slider bar as a hexidecimal RGB value.""")
direction = param.ObjectSelector(default='ltr', objects=['ltr', 'rtl'],
doc="""
Whether the slider should go from left-to-right ('ltr') or
right-to-left ('rtl')""")
orientation = param.ObjectSelector(default='horizontal',
objects=['horizontal', 'vertical'], doc="""
Whether the slider should be oriented horizontally or
vertically.""")
show_value = param.Boolean(default=True, doc="""
Whether to show the widget value.""")
tooltips = param.Boolean(default=True, doc="""
Whether the slider handle should display tooltips.""")
_widget_type = _BkSlider
__abstract = True
class ContinuousSlider(_SliderBase):
format = param.ClassSelector(class_=string_types+(TickFormatter,), doc="""
Allows defining a custom format string or bokeh TickFormatter.""")
_supports_embed = True
__abstract = True
def __init__(self, **params):
if 'value' not in params:
params['value'] = params.get('start', self.start)
super(ContinuousSlider, self).__init__(**params)
def _get_embed_state(self, root, max_opts=3):
ref = root.ref['id']
w_model, parent = self._models[ref]
_, _, doc, comm = state._views[ref]
# Compute sampling
start, end, step = w_model.start, w_model.end, w_model.step
span = end-start
dtype = int if isinstance(step, int) else float
if (span/step) > (max_opts-1):
step = dtype(span/(max_opts-1))
vals = [dtype(v) for v in np.arange(start, end+step, step)]
# Replace model
layout_opts = {k: v for k, v in self.param.get_param_values()
if k in Layoutable.param and k != 'name'}
dw = DiscreteSlider(options=vals, name=self.name, **layout_opts)
dw.link(self, value='value')
self._models.pop(ref)
index = parent.children.index(w_model)
with config.set(embed=True):
w_model = dw._get_model(doc, root, parent, comm)
link = CustomJS(code=dw._jslink.code['value'], args={
'source': w_model.children[1], 'target': w_model.children[0]})
parent.children[index] = w_model
w_model = w_model.children[1]
w_model.js_on_change('value', link)
return (dw, w_model, vals, lambda x: x.value, 'value', 'cb_obj.value')
class FloatSlider(ContinuousSlider):
start = param.Number(default=0.0)
end = param.Number(default=1.0)
value = param.Number(default=0.0)
value_throttled = param.Number(default=None)
step = param.Number(default=0.1)
class IntSlider(ContinuousSlider):
value = param.Integer(default=0)
value_throttled = param.Integer(default=None)
start = param.Integer(default=0)
end = param.Integer(default=1)
step = param.Integer(default=1)
def _process_property_change(self, msg):
msg = super(_SliderBase, self)._process_property_change(msg)
if 'value' in msg:
msg['value'] = msg['value'] if msg['value'] is None else int(msg['value'])
if 'value_throttled' in msg:
throttled = msg['value_throttled']
msg['value_throttled'] = throttled if throttled is None else int(throttled)
return msg
class DateSlider(_SliderBase):
value = param.Date(default=None)
value_throttled = param.Date(default=None)
start = param.Date(default=None)
end = param.Date(default=None)
_source_transforms = {'value': None, 'value_throttled': None, 'start': None, 'end': None}
_widget_type = _BkDateSlider
def __init__(self, **params):
if 'value' not in params:
params['value'] = params.get('start', self.start)
super(DateSlider, self).__init__(**params)
def _process_property_change(self, msg):
msg = super(_SliderBase, self)._process_property_change(msg)
if 'value' in msg:
msg['value'] = value_as_date(msg['value'])
if 'value_throttled' in msg:
msg['value_throttled'] = value_as_date(msg['value_throttled'])
return msg
class DiscreteSlider(CompositeWidget, _SliderBase):
options = param.ClassSelector(default=[], class_=(dict, list))
value = param.Parameter()
value_throttled = param.Parameter()
formatter = param.String(default='%.3g')
_source_transforms = {'value': None, 'value_throttled': None, 'options': None}
_rename = {'formatter': None}
_supports_embed = True
_text_link = """
var labels = {labels}
target.text = labels[source.value]
"""
def __init__(self, **params):
self._syncing = False
super(DiscreteSlider, self).__init__(**params)
if 'formatter' not in params and all(isinstance(v, (int, np.int_)) for v in self.values):
self.formatter = '%d'
if self.value is None and None not in self.values and self.options:
self.value = self.values[0]
elif self.value not in self.values:
raise ValueError('Value %s not a valid option, '
'ensure that the supplied value '
'is one of the declared options.'
% self.value)
self._text = StaticText(margin=(5, 0, 0, 5), style={'white-space': 'nowrap'})
self._slider = None
self._composite = Column(self._text, self._slider)
self._update_options()
self.param.watch(self._update_options, ['options', 'formatter'])
self.param.watch(self._update_value, ['value'])
self.param.watch(self._update_style, [p for p in Layoutable.param if p !='name'])
def _update_options(self, *events):
values, labels = self.values, self.labels
if self.value not in values:
value = 0
self.value = values[0]
else:
value = values.index(self.value)
self._slider = IntSlider(
start=0, end=len(self.options)-1, value=value, tooltips=False,
show_value=False, margin=(0, 5, 5, 5), _supports_embed=False
)
self._update_style()
js_code = self._text_link.format(
labels='['+', '.join([unicode_repr(l) for l in labels])+']'
)
self._jslink = self._slider.jslink(self._text, code={'value': js_code})
self._slider.param.watch(self._sync_value, 'value')
self._text.value = labels[value]
self._composite[1] = self._slider
def _update_value(self, event):
values = self.values
if self.value not in values:
self.value = values[0]
return
index = self.values.index(self.value)
if self._syncing:
return
try:
self._syncing = True
self._slider.value = index
finally:
self._syncing = False
def _update_style(self, *events):
style = {p: getattr(self, p) for p in Layoutable.param if p != 'name'}
margin = style.pop('margin')
if isinstance(margin, tuple):
if len(margin) == 2:
t = b = margin[0]
r = l = margin[1]
else:
t, r, b, l = margin
else:
t = r = b = l = margin
text_margin = (t, 0, 0, l)
slider_margin = (0, r, b, l)
self._text.param.set_param(
margin=text_margin, **{k: v for k, v in style.items() if k != 'style'})
self._slider.param.set_param(margin=slider_margin, **style)
if self.width:
style['width'] = self.width + l + r
self._composite.param.set_param(**style)
def _sync_value(self, event):
if self._syncing:
return
try:
self._syncing = True
self.value = self.values[event.new]
finally:
self._syncing = False
def _get_embed_state(self, root, max_opts=3):
model = self._composite[1]._models[root.ref['id']][0]
return self, model, self.values, lambda x: x.value, 'value', 'cb_obj.value'
@property
def labels(self):
title = (self.name + ': ' if self.name else '')
if isinstance(self.options, dict):
return [title + ('<b>%s</b>' % o) for o in self.options]
else:
return [title + ('<b>%s</b>' % (o if isinstance(o, string_types) else (self.formatter % o)))
for o in self.options]
@property
def values(self):
return list(self.options.values()) if isinstance(self.options, dict) else self.options
class RangeSlider(_SliderBase):
format = param.ClassSelector(class_=string_types+(TickFormatter,), doc="""
Allows defining a custom format string or bokeh TickFormatter.""")
value = param.NumericTuple(default=(0, 1), length=2)
value_throttled = param.NumericTuple(default=None, length=2)
start = param.Number(default=0)
end = param.Number(default=1)
step = param.Number(default=0.1)
_widget_type = _BkRangeSlider
def __init__(self, **params):
if 'value' not in params:
params['value'] = (params.get('start', self.start),
params.get('end', self.end))
super(RangeSlider, self).__init__(**params)
values = [self.value[0], self.value[1], self.start, self.end]
if (all(v is None or isinstance(v, int) for v in values) and
'step' not in params):
self.step = 1
def _process_property_change(self, msg):
msg = super(RangeSlider, self)._process_property_change(msg)
if 'value' in msg:
msg['value'] = tuple(msg['value'])
if 'value_throttled' in msg:
msg['value_throttled'] = tuple(msg['value_throttled'])
return msg
class IntRangeSlider(RangeSlider):
start = param.Integer(default=0)
end = param.Integer(default=1)
step = param.Integer(default=1)
def _process_property_change(self, msg):
msg = super(RangeSlider, self)._process_property_change(msg)
if 'value' in msg:
msg['value'] = tuple([v if v is None else int(v)
for v in msg['value']])
if 'value_throttled' in msg:
msg['value_throttled'] = tuple([v if v is None else int(v)
for v in msg['value_throttled']])
return msg
class DateRangeSlider(_SliderBase):
value = param.Tuple(default=(None, None), length=2)
value_throttled = param.Tuple(default=None, length=2)
start = param.Date(default=None)
end = param.Date(default=None)
step = param.Number(default=1)
_source_transforms = {'value': None, 'value_throttled': None,
'start': None, 'end': None, 'step': None}
_widget_type = _BkDateRangeSlider
def __init__(self, **params):
if 'value' not in params:
params['value'] = (params.get('start', self.start),
params.get('end', self.end))
super(DateRangeSlider, self).__init__(**params)
def _process_property_change(self, msg):
msg = super(DateRangeSlider, self)._process_property_change(msg)
if 'value' in msg:
v1, v2 = msg['value']
msg['value'] = (value_as_datetime(v1), value_as_datetime(v2))
if 'value_throttled' in msg:
v1, v2 = msg['value_throttled']
msg['value_throttled'] = (value_as_datetime(v1), value_as_datetime(v2))
return msg | panel/widgets/slider.py | from __future__ import absolute_import, division, unicode_literals
from six import string_types
import param
import numpy as np
from bokeh.models import CustomJS
from bokeh.models.formatters import TickFormatter
from bokeh.models.widgets import (
DateSlider as _BkDateSlider, DateRangeSlider as _BkDateRangeSlider,
RangeSlider as _BkRangeSlider, Slider as _BkSlider)
from ..config import config
from ..io import state
from ..util import unicode_repr, value_as_datetime, value_as_date
from ..viewable import Layoutable
from .base import Widget, CompositeWidget
from ..layout import Column
from .input import StaticText
class _SliderBase(Widget):
bar_color = param.Color(default="#e6e6e6", doc="""
Color of the slider bar as a hexidecimal RGB value.""")
direction = param.ObjectSelector(default='ltr', objects=['ltr', 'rtl'],
doc="""
Whether the slider should go from left-to-right ('ltr') or
right-to-left ('rtl')""")
orientation = param.ObjectSelector(default='horizontal',
objects=['horizontal', 'vertical'], doc="""
Whether the slider should be oriented horizontally or
vertically.""")
show_value = param.Boolean(default=True, doc="""
Whether to show the widget value.""")
tooltips = param.Boolean(default=True, doc="""
Whether the slider handle should display tooltips.""")
_widget_type = _BkSlider
__abstract = True
class ContinuousSlider(_SliderBase):
format = param.ClassSelector(class_=string_types+(TickFormatter,), doc="""
Allows defining a custom format string or bokeh TickFormatter.""")
_supports_embed = True
__abstract = True
def __init__(self, **params):
if 'value' not in params:
params['value'] = params.get('start', self.start)
super(ContinuousSlider, self).__init__(**params)
def _get_embed_state(self, root, max_opts=3):
ref = root.ref['id']
w_model, parent = self._models[ref]
_, _, doc, comm = state._views[ref]
# Compute sampling
start, end, step = w_model.start, w_model.end, w_model.step
span = end-start
dtype = int if isinstance(step, int) else float
if (span/step) > (max_opts-1):
step = dtype(span/(max_opts-1))
vals = [dtype(v) for v in np.arange(start, end+step, step)]
# Replace model
layout_opts = {k: v for k, v in self.param.get_param_values()
if k in Layoutable.param and k != 'name'}
dw = DiscreteSlider(options=vals, name=self.name, **layout_opts)
dw.link(self, value='value')
self._models.pop(ref)
index = parent.children.index(w_model)
with config.set(embed=True):
w_model = dw._get_model(doc, root, parent, comm)
link = CustomJS(code=dw._jslink.code['value'], args={
'source': w_model.children[1], 'target': w_model.children[0]})
parent.children[index] = w_model
w_model = w_model.children[1]
w_model.js_on_change('value', link)
return (dw, w_model, vals, lambda x: x.value, 'value', 'cb_obj.value')
class FloatSlider(ContinuousSlider):
start = param.Number(default=0.0)
end = param.Number(default=1.0)
value = param.Number(default=0.0)
value_throttled = param.Number(default=None)
step = param.Number(default=0.1)
class IntSlider(ContinuousSlider):
value = param.Integer(default=0)
value_throttled = param.Integer(default=None)
start = param.Integer(default=0)
end = param.Integer(default=1)
step = param.Integer(default=1)
def _process_property_change(self, msg):
msg = super(_SliderBase, self)._process_property_change(msg)
if 'value' in msg:
msg['value'] = msg['value'] if msg['value'] is None else int(msg['value'])
if 'value_throttled' in msg:
throttled = msg['value_throttled']
msg['value_throttled'] = throttled if throttled is None else int(throttled)
return msg
class DateSlider(_SliderBase):
value = param.Date(default=None)
value_throttled = param.Date(default=None)
start = param.Date(default=None)
end = param.Date(default=None)
_source_transforms = {'value': None, 'value_throttled': None, 'start': None, 'end': None}
_widget_type = _BkDateSlider
def __init__(self, **params):
if 'value' not in params:
params['value'] = params.get('start', self.start)
super(DateSlider, self).__init__(**params)
def _process_property_change(self, msg):
msg = super(_SliderBase, self)._process_property_change(msg)
if 'value' in msg:
msg['value'] = value_as_date(msg['value'])
if 'value_throttled' in msg:
msg['value_throttled'] = value_as_date(msg['value_throttled'])
return msg
class DiscreteSlider(CompositeWidget, _SliderBase):
options = param.ClassSelector(default=[], class_=(dict, list))
value = param.Parameter()
value_throttled = param.Parameter()
formatter = param.String(default='%.3g')
_source_transforms = {'value': None, 'value_throttled': None, 'options': None}
_rename = {'formatter': None}
_supports_embed = True
_text_link = """
var labels = {labels}
target.text = labels[source.value]
"""
def __init__(self, **params):
self._syncing = False
super(DiscreteSlider, self).__init__(**params)
if 'formatter' not in params and all(isinstance(v, (int, np.int_)) for v in self.values):
self.formatter = '%d'
if self.value is None and None not in self.values and self.options:
self.value = self.values[0]
elif self.value not in self.values:
raise ValueError('Value %s not a valid option, '
'ensure that the supplied value '
'is one of the declared options.'
% self.value)
self._text = StaticText(margin=(5, 0, 0, 5), style={'white-space': 'nowrap'})
self._slider = None
self._composite = Column(self._text, self._slider)
self._update_options()
self.param.watch(self._update_options, ['options', 'formatter'])
self.param.watch(self._update_value, ['value'])
self.param.watch(self._update_style, [p for p in Layoutable.param if p !='name'])
def _update_options(self, *events):
values, labels = self.values, self.labels
if self.value not in values:
value = 0
self.value = values[0]
else:
value = values.index(self.value)
self._slider = IntSlider(
start=0, end=len(self.options)-1, value=value, tooltips=False,
show_value=False, margin=(0, 5, 5, 5), _supports_embed=False
)
self._update_style()
js_code = self._text_link.format(
labels='['+', '.join([unicode_repr(l) for l in labels])+']'
)
self._jslink = self._slider.jslink(self._text, code={'value': js_code})
self._slider.param.watch(self._sync_value, 'value')
self._text.value = labels[value]
self._composite[1] = self._slider
def _update_value(self, event):
values = self.values
if self.value not in values:
self.value = values[0]
return
index = self.values.index(self.value)
if self._syncing:
return
try:
self._syncing = True
self._slider.value = index
finally:
self._syncing = False
def _update_style(self, *events):
style = {p: getattr(self, p) for p in Layoutable.param if p != 'name'}
margin = style.pop('margin')
if isinstance(margin, tuple):
if len(margin) == 2:
t = b = margin[0]
r = l = margin[1]
else:
t, r, b, l = margin
else:
t = r = b = l = margin
text_margin = (t, 0, 0, l)
slider_margin = (0, r, b, l)
self._text.param.set_param(
margin=text_margin, **{k: v for k, v in style.items() if k != 'style'})
self._slider.param.set_param(margin=slider_margin, **style)
if self.width:
style['width'] = self.width + l + r
self._composite.param.set_param(**style)
def _sync_value(self, event):
if self._syncing:
return
try:
self._syncing = True
self.value = self.values[event.new]
finally:
self._syncing = False
def _get_embed_state(self, root, max_opts=3):
model = self._composite[1]._models[root.ref['id']][0]
return self, model, self.values, lambda x: x.value, 'value', 'cb_obj.value'
@property
def labels(self):
title = (self.name + ': ' if self.name else '')
if isinstance(self.options, dict):
return [title + ('<b>%s</b>' % o) for o in self.options]
else:
return [title + ('<b>%s</b>' % (o if isinstance(o, string_types) else (self.formatter % o)))
for o in self.options]
@property
def values(self):
return list(self.options.values()) if isinstance(self.options, dict) else self.options
class RangeSlider(_SliderBase):
format = param.ClassSelector(class_=string_types+(TickFormatter,), doc="""
Allows defining a custom format string or bokeh TickFormatter.""")
value = param.NumericTuple(default=(0, 1), length=2)
value_throttled = param.NumericTuple(default=None, length=2)
start = param.Number(default=0)
end = param.Number(default=1)
step = param.Number(default=0.1)
_widget_type = _BkRangeSlider
def __init__(self, **params):
if 'value' not in params:
params['value'] = (params.get('start', self.start),
params.get('end', self.end))
super(RangeSlider, self).__init__(**params)
values = [self.value[0], self.value[1], self.start, self.end]
if (all(v is None or isinstance(v, int) for v in values) and
'step' not in params):
self.step = 1
def _process_property_change(self, msg):
msg = super(RangeSlider, self)._process_property_change(msg)
if 'value' in msg:
msg['value'] = tuple(msg['value'])
if 'value_throttled' in msg:
msg['value_throttled'] = tuple(msg['value_throttled'])
return msg
class IntRangeSlider(RangeSlider):
start = param.Integer(default=0)
end = param.Integer(default=1)
step = param.Integer(default=1)
def _process_property_change(self, msg):
msg = super(RangeSlider, self)._process_property_change(msg)
if 'value' in msg:
msg['value'] = tuple([v if v is None else int(v)
for v in msg['value']])
if 'value_throttled' in msg:
msg['value_throttled'] = tuple([v if v is None else int(v)
for v in msg['value_throttled']])
return msg
class DateRangeSlider(_SliderBase):
value = param.Tuple(default=(None, None), length=2)
value_throttled = param.Tuple(default=None, length=2)
start = param.Date(default=None)
end = param.Date(default=None)
step = param.Number(default=1)
_source_transforms = {'value': None, 'value_throttled': None,
'start': None, 'end': None, 'step': None}
_widget_type = _BkDateRangeSlider
def __init__(self, **params):
if 'value' not in params:
params['value'] = (params.get('start', self.start),
params.get('end', self.end))
super(DateRangeSlider, self).__init__(**params)
def _process_property_change(self, msg):
msg = super(DateRangeSlider, self)._process_property_change(msg)
if 'value' in msg:
v1, v2 = msg['value']
msg['value'] = (value_as_datetime(v1), value_as_datetime(v2))
if 'value_throttled' in msg:
v1, v2 = msg['value_throttled']
msg['value_throttled'] = (value_as_datetime(v1), value_as_datetime(v2))
return msg | 0.706292 | 0.245209 |
from __future__ import annotations
from collections.abc import Callable
from typing import Any, cast
from pytradfri.command import Command
from homeassistant.components.cover import ATTR_POSITION, CoverEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .base_class import TradfriBaseEntity
from .const import (
ATTR_MODEL,
CONF_GATEWAY_ID,
COORDINATOR,
COORDINATOR_LIST,
DOMAIN,
KEY_API,
)
from .coordinator import TradfriDeviceDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Load Tradfri covers based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
coordinator_data = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
api = coordinator_data[KEY_API]
async_add_entities(
TradfriCover(
device_coordinator,
api,
gateway_id,
)
for device_coordinator in coordinator_data[COORDINATOR_LIST]
if device_coordinator.device.has_blind_control
)
class TradfriCover(TradfriBaseEntity, CoverEntity):
"""The platform class required by Home Assistant."""
def __init__(
self,
device_coordinator: TradfriDeviceDataUpdateCoordinator,
api: Callable[[Command | list[Command]], Any],
gateway_id: str,
) -> None:
"""Initialize a switch."""
super().__init__(
device_coordinator=device_coordinator,
api=api,
gateway_id=gateway_id,
)
self._device_control = self._device.blind_control
self._device_data = self._device_control.blinds[0]
def _refresh(self) -> None:
"""Refresh the device."""
self._device_data = self.coordinator.data.blind_control.blinds[0]
@property
def extra_state_attributes(self) -> dict[str, str] | None:
"""Return the state attributes."""
return {ATTR_MODEL: self._device.device_info.model_number}
@property
def current_cover_position(self) -> int | None:
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
if not self._device_data:
return None
return 100 - cast(int, self._device_data.current_cover_position)
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Move the cover to a specific position."""
if not self._device_control:
return
await self._api(self._device_control.set_state(100 - kwargs[ATTR_POSITION]))
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
if not self._device_control:
return
await self._api(self._device_control.set_state(0))
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close cover."""
if not self._device_control:
return
await self._api(self._device_control.set_state(100))
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Close cover."""
if not self._device_control:
return
await self._api(self._device_control.trigger_blind())
@property
def is_closed(self) -> bool:
"""Return if the cover is closed or not."""
return self.current_cover_position == 0 | homeassistant/components/tradfri/cover.py | from __future__ import annotations
from collections.abc import Callable
from typing import Any, cast
from pytradfri.command import Command
from homeassistant.components.cover import ATTR_POSITION, CoverEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from .base_class import TradfriBaseEntity
from .const import (
ATTR_MODEL,
CONF_GATEWAY_ID,
COORDINATOR,
COORDINATOR_LIST,
DOMAIN,
KEY_API,
)
from .coordinator import TradfriDeviceDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback,
) -> None:
"""Load Tradfri covers based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
coordinator_data = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
api = coordinator_data[KEY_API]
async_add_entities(
TradfriCover(
device_coordinator,
api,
gateway_id,
)
for device_coordinator in coordinator_data[COORDINATOR_LIST]
if device_coordinator.device.has_blind_control
)
class TradfriCover(TradfriBaseEntity, CoverEntity):
"""The platform class required by Home Assistant."""
def __init__(
self,
device_coordinator: TradfriDeviceDataUpdateCoordinator,
api: Callable[[Command | list[Command]], Any],
gateway_id: str,
) -> None:
"""Initialize a switch."""
super().__init__(
device_coordinator=device_coordinator,
api=api,
gateway_id=gateway_id,
)
self._device_control = self._device.blind_control
self._device_data = self._device_control.blinds[0]
def _refresh(self) -> None:
"""Refresh the device."""
self._device_data = self.coordinator.data.blind_control.blinds[0]
@property
def extra_state_attributes(self) -> dict[str, str] | None:
"""Return the state attributes."""
return {ATTR_MODEL: self._device.device_info.model_number}
@property
def current_cover_position(self) -> int | None:
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
if not self._device_data:
return None
return 100 - cast(int, self._device_data.current_cover_position)
async def async_set_cover_position(self, **kwargs: Any) -> None:
"""Move the cover to a specific position."""
if not self._device_control:
return
await self._api(self._device_control.set_state(100 - kwargs[ATTR_POSITION]))
async def async_open_cover(self, **kwargs: Any) -> None:
"""Open the cover."""
if not self._device_control:
return
await self._api(self._device_control.set_state(0))
async def async_close_cover(self, **kwargs: Any) -> None:
"""Close cover."""
if not self._device_control:
return
await self._api(self._device_control.set_state(100))
async def async_stop_cover(self, **kwargs: Any) -> None:
"""Close cover."""
if not self._device_control:
return
await self._api(self._device_control.trigger_blind())
@property
def is_closed(self) -> bool:
"""Return if the cover is closed or not."""
return self.current_cover_position == 0 | 0.908355 | 0.116011 |
import time
from signal import signal, SIGSEGV
from types import MethodType
try:
import mathsat
from pysmt.solvers.msat import MathSAT5Solver
except ImportError:
class MathSAT5Solver:
pass
try:
from pysmt.solvers.z3 import Z3Solver
except ImportError:
class Z3Solver:
pass
from pysmt.environment import Environment as PysmtEnv
from pysmt.logics import Logic
from pysmt.exceptions import SolverReturnedUnknownResultError
from solver import get_solvers, Solver
from utils import log
class MultiSolver:
"""Keep multiple solvers synchronised,
upon failure of one solver try using the following one"""
def __init__(self, env: PysmtEnv, timeout: int,
logic: Logic = None, log_lvl: int = 5,
pref_vars=None):
assert isinstance(env, PysmtEnv)
assert isinstance(timeout, int)
assert timeout >= 0
assert logic is None or isinstance(logic, Logic)
assert isinstance(log_lvl, int)
self.log_lvl = log_lvl
self.env = env
self.logic = logic
self.timeout = timeout
self._segfault = False
self._solvers = []
for name in get_solvers():
solver = Solver(env, name=name, logic=logic)
_add_timeout_solve_method(solver, timeout)
if pref_vars:
_set_pref_vars(solver, pref_vars)
assert hasattr(solver, "timeout_solve")
self._solvers.append(solver)
# self._solvers = [Solver(env, name=name, logic=logic)
# for name in get_solvers()]
assert len(self._solvers) > 0
self._solver_idx = 0
self._is_timeout = False
log(f"\tUsing solver: {get_solvers()[self._solver_idx]}",
self.log_lvl)
def _segfault_handler(self):
self._segfault = True
def __enter__(self):
self._segfault = False
self._prev_handler = signal(SIGSEGV, self._segfault_handler)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for solver in self._solvers:
solver.exit()
signal(SIGSEGV, self._prev_handler)
del self._solvers
@property
def _c_solver(self):
return self._solvers[self._solver_idx]
@property
def active_solver_name(self) -> str:
return get_solvers()[self._solver_idx]
def _next_solver(self) -> None:
self._solver_idx += 1
log("\tNo other solvers" if self._solver_idx >= len(self._solvers)
else f"\tUsing solver: {get_solvers()[self._solver_idx]}",
self.log_lvl)
@property
def assertions(self):
return self._c_solver.assertions
def solve(self, assumptions=None):
assert assumptions is None or \
all(s in self.env.formula_manager.formulae.values()
for s in assumptions)
res = None
while self._solver_idx < len(self._solvers) and res is None:
try:
# res = solve_with_timeout(self.timeout, self._c_solver,
# assumptions=assumptions)
assert self._segfault is False
res = self._c_solver.timeout_solve(assumptions=assumptions)
except SolverReturnedUnknownResultError:
# solver could be in an undefined state, reset assertions.
self._c_solver.reset_assertions()
self._is_timeout = True
res = None
if self._segfault is True:
self._segfault = False
new_s = Solver(self.env, name=get_solvers()[self._solver_idx],
logic=self.logic)
self._solvers[self._solver_idx] = new_s
res = None
if res is None:
self._next_solver()
return self._get_solve_result(res)
def _get_solve_result(self, res):
"""Return res if the last solve query was successful;
Raise exception if some query ended with timeout;
Return None otherwise"""
assert res is None or isinstance(res, bool)
if res is not None:
assert self._solver_idx < len(self._solvers)
return res
if self._solver_idx == len(self._solvers):
self._solver_idx = 0
if self._is_timeout:
self._is_timeout = False
raise SolverReturnedUnknownResultError()
return None
def get_model(self):
return self._c_solver.get_model()
def get_values(self, formulae):
assert all(fm in self.env.formula_manager.formulae.values()
for fm in formulae)
return self._c_solver.get_values(formulae)
def push(self, levels=1):
for s in self._solvers[self._solver_idx:]:
s.push(levels=levels)
def pop(self, levels=1):
for s in self._solvers[self._solver_idx:]:
s.pop(levels=levels)
def reset_assertions(self):
for s in self._solvers:
s.reset_assertions()
self._solver_idx = 0
assert all(len(s.assertions) == 0 for s in self._solvers)
def add_assertion(self, formula, named=None):
assert formula in self.env.formula_manager.formulae.values()
for s in self._solvers:
s.add_assertion(formula, named=named)
def add_assertions(self, formulae):
assert all(fm in self.env.formula_manager.formulae.values()
for fm in formulae)
for s in self._solvers:
s.add_assertions(formulae)
def get_value(self, formula):
assert formula in self.env.formula_manager.formulae.values()
self._c_solver.get_value(formula)
def get_py_value(self, formula):
assert formula in self.env.formula_manager.formulae.values()
self._c_solver.get_py_value(formula)
def get_unsat_core(self):
self._c_solver.get_unsat_core()
def get_named_unsat_core(self):
self._c_solver.get_named_unsat_core()
def _add_timeout_solve_method(solver, secs: int) -> None:
if isinstance(solver, MathSAT5Solver):
def solver_timeout_solve(self, assumptions=None):
start = time.time()
count = [0]
def ttest():
count[0] += 1
if count[0] == 100:
count[0] = 0
cur = time.time()
return int(cur - start > secs)
return 0
mathsat.msat_set_termination_test(solver.msat_env(), ttest)
return self.solve(assumptions)
timeout_solve = MethodType(solver_timeout_solve, solver)
elif isinstance(solver, Z3Solver):
solver.z3.set(timeout=secs * 1000)
timeout_solve = solver.solve
else:
raise Exception("Timeout supported only for MathSAT and Z3.")
solver.timeout_solve = timeout_solve
def _set_pref_vars(solver, pref_vars):
assert pref_vars is not None
assert len(pref_vars) > 0
if isinstance(solver, MathSAT5Solver):
true = solver.env.formula_manager.TRUE()
for v in pref_vars:
solver.set_preferred_var(v, val=true)
elif isinstance(solver, Z3Solver):
# replace Z3Solver with Z3 Optimize in pysmt object.
from z3 import Optimize
from pysmt.solvers.z3 import Z3Converter
solver.z3 = Optimize()
solver.converter = Z3Converter(solver.env, solver.z3.ctx)
for s in pref_vars:
solver.z3.add_soft(solver.converter.convert(s)) | src/multisolver.py | import time
from signal import signal, SIGSEGV
from types import MethodType
try:
import mathsat
from pysmt.solvers.msat import MathSAT5Solver
except ImportError:
class MathSAT5Solver:
pass
try:
from pysmt.solvers.z3 import Z3Solver
except ImportError:
class Z3Solver:
pass
from pysmt.environment import Environment as PysmtEnv
from pysmt.logics import Logic
from pysmt.exceptions import SolverReturnedUnknownResultError
from solver import get_solvers, Solver
from utils import log
class MultiSolver:
"""Keep multiple solvers synchronised,
upon failure of one solver try using the following one"""
def __init__(self, env: PysmtEnv, timeout: int,
logic: Logic = None, log_lvl: int = 5,
pref_vars=None):
assert isinstance(env, PysmtEnv)
assert isinstance(timeout, int)
assert timeout >= 0
assert logic is None or isinstance(logic, Logic)
assert isinstance(log_lvl, int)
self.log_lvl = log_lvl
self.env = env
self.logic = logic
self.timeout = timeout
self._segfault = False
self._solvers = []
for name in get_solvers():
solver = Solver(env, name=name, logic=logic)
_add_timeout_solve_method(solver, timeout)
if pref_vars:
_set_pref_vars(solver, pref_vars)
assert hasattr(solver, "timeout_solve")
self._solvers.append(solver)
# self._solvers = [Solver(env, name=name, logic=logic)
# for name in get_solvers()]
assert len(self._solvers) > 0
self._solver_idx = 0
self._is_timeout = False
log(f"\tUsing solver: {get_solvers()[self._solver_idx]}",
self.log_lvl)
def _segfault_handler(self):
self._segfault = True
def __enter__(self):
self._segfault = False
self._prev_handler = signal(SIGSEGV, self._segfault_handler)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
for solver in self._solvers:
solver.exit()
signal(SIGSEGV, self._prev_handler)
del self._solvers
@property
def _c_solver(self):
return self._solvers[self._solver_idx]
@property
def active_solver_name(self) -> str:
return get_solvers()[self._solver_idx]
def _next_solver(self) -> None:
self._solver_idx += 1
log("\tNo other solvers" if self._solver_idx >= len(self._solvers)
else f"\tUsing solver: {get_solvers()[self._solver_idx]}",
self.log_lvl)
@property
def assertions(self):
return self._c_solver.assertions
def solve(self, assumptions=None):
assert assumptions is None or \
all(s in self.env.formula_manager.formulae.values()
for s in assumptions)
res = None
while self._solver_idx < len(self._solvers) and res is None:
try:
# res = solve_with_timeout(self.timeout, self._c_solver,
# assumptions=assumptions)
assert self._segfault is False
res = self._c_solver.timeout_solve(assumptions=assumptions)
except SolverReturnedUnknownResultError:
# solver could be in an undefined state, reset assertions.
self._c_solver.reset_assertions()
self._is_timeout = True
res = None
if self._segfault is True:
self._segfault = False
new_s = Solver(self.env, name=get_solvers()[self._solver_idx],
logic=self.logic)
self._solvers[self._solver_idx] = new_s
res = None
if res is None:
self._next_solver()
return self._get_solve_result(res)
def _get_solve_result(self, res):
"""Return res if the last solve query was successful;
Raise exception if some query ended with timeout;
Return None otherwise"""
assert res is None or isinstance(res, bool)
if res is not None:
assert self._solver_idx < len(self._solvers)
return res
if self._solver_idx == len(self._solvers):
self._solver_idx = 0
if self._is_timeout:
self._is_timeout = False
raise SolverReturnedUnknownResultError()
return None
def get_model(self):
return self._c_solver.get_model()
def get_values(self, formulae):
assert all(fm in self.env.formula_manager.formulae.values()
for fm in formulae)
return self._c_solver.get_values(formulae)
def push(self, levels=1):
for s in self._solvers[self._solver_idx:]:
s.push(levels=levels)
def pop(self, levels=1):
for s in self._solvers[self._solver_idx:]:
s.pop(levels=levels)
def reset_assertions(self):
for s in self._solvers:
s.reset_assertions()
self._solver_idx = 0
assert all(len(s.assertions) == 0 for s in self._solvers)
def add_assertion(self, formula, named=None):
assert formula in self.env.formula_manager.formulae.values()
for s in self._solvers:
s.add_assertion(formula, named=named)
def add_assertions(self, formulae):
assert all(fm in self.env.formula_manager.formulae.values()
for fm in formulae)
for s in self._solvers:
s.add_assertions(formulae)
def get_value(self, formula):
assert formula in self.env.formula_manager.formulae.values()
self._c_solver.get_value(formula)
def get_py_value(self, formula):
assert formula in self.env.formula_manager.formulae.values()
self._c_solver.get_py_value(formula)
def get_unsat_core(self):
self._c_solver.get_unsat_core()
def get_named_unsat_core(self):
self._c_solver.get_named_unsat_core()
def _add_timeout_solve_method(solver, secs: int) -> None:
if isinstance(solver, MathSAT5Solver):
def solver_timeout_solve(self, assumptions=None):
start = time.time()
count = [0]
def ttest():
count[0] += 1
if count[0] == 100:
count[0] = 0
cur = time.time()
return int(cur - start > secs)
return 0
mathsat.msat_set_termination_test(solver.msat_env(), ttest)
return self.solve(assumptions)
timeout_solve = MethodType(solver_timeout_solve, solver)
elif isinstance(solver, Z3Solver):
solver.z3.set(timeout=secs * 1000)
timeout_solve = solver.solve
else:
raise Exception("Timeout supported only for MathSAT and Z3.")
solver.timeout_solve = timeout_solve
def _set_pref_vars(solver, pref_vars):
assert pref_vars is not None
assert len(pref_vars) > 0
if isinstance(solver, MathSAT5Solver):
true = solver.env.formula_manager.TRUE()
for v in pref_vars:
solver.set_preferred_var(v, val=true)
elif isinstance(solver, Z3Solver):
# replace Z3Solver with Z3 Optimize in pysmt object.
from z3 import Optimize
from pysmt.solvers.z3 import Z3Converter
solver.z3 = Optimize()
solver.converter = Z3Converter(solver.env, solver.z3.ctx)
for s in pref_vars:
solver.z3.add_soft(solver.converter.convert(s)) | 0.646906 | 0.35421 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
from spotpy.examples.spot_setup_rosenbrock import spot_setup
from spotpy.describe import describe
#Create samplers for every algorithm:
results=[]
rep=1000
timeout=10 #Given in Seconds
parallel = "seq"
dbformat = "csv"
# Bayesian algorithms should be run with a likelihood function
bayesian_likelihood_func = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut
sampler = spotpy.algorithms.mc(spot_setup(),
parallel=parallel, dbname='RosenMC', dbformat=dbformat, sim_timeout=timeout)
print(describe(sampler))
sampler.sample(rep)
results.append(sampler.getdata())
sampler = spotpy.algorithms.lhs(spot_setup(),
parallel=parallel, dbname='RosenLHS', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.mle(spot_setup(obj_func=bayesian_likelihood_func),
parallel=parallel, dbname='RosenMLE', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.sceua(spot_setup(),
parallel=parallel, dbname='RosenSCEUA', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep,ngs=4)
results.append(sampler.getdata())
sampler=spotpy.algorithms.sa(spot_setup(obj_func=bayesian_likelihood_func),
parallel=parallel, dbname='RosenSA', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.rope(spot_setup(),
parallel=parallel, dbname='RosenROPE', dbformat=dbformat,sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.abc(spot_setup(),
parallel=parallel, dbname='RosenABC', dbformat=dbformat,sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.fscabc(spot_setup(),
parallel=parallel, dbname='RosenFSABC', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.mcmc(spot_setup(obj_func=bayesian_likelihood_func),
parallel=parallel, dbname='RosenMCMC', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.demcz(spot_setup(obj_func = bayesian_likelihood_func),
parallel=parallel, dbname='RosenDEMCz', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep,nChains=4)
results.append(sampler.getdata())
sampler=spotpy.algorithms.dream(spot_setup(obj_func = bayesian_likelihood_func),
parallel=parallel, dbname='RosenDREAM', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep,nChains=4)
results.append(sampler.getdata())
print(results[0].dtype) # Check for Travis: Get the last sampled parameter for x
evaluation = spot_setup().evaluation()
# Example how to plot the data
algorithms = ['mc','lhs','mle','sceua','sa','rope','abc','fscabc', 'mcmc', 'demcz', 'dream']
spotpy.analyser.plot_parametertrace_algorithms(results, algorithms, spot_setup()) | spotpy/examples/tutorial_rosenbrock.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
try:
import spotpy
except ImportError:
import sys
sys.path.append(".")
import spotpy
from spotpy.examples.spot_setup_rosenbrock import spot_setup
from spotpy.describe import describe
#Create samplers for every algorithm:
results=[]
rep=1000
timeout=10 #Given in Seconds
parallel = "seq"
dbformat = "csv"
# Bayesian algorithms should be run with a likelihood function
bayesian_likelihood_func = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut
sampler = spotpy.algorithms.mc(spot_setup(),
parallel=parallel, dbname='RosenMC', dbformat=dbformat, sim_timeout=timeout)
print(describe(sampler))
sampler.sample(rep)
results.append(sampler.getdata())
sampler = spotpy.algorithms.lhs(spot_setup(),
parallel=parallel, dbname='RosenLHS', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.mle(spot_setup(obj_func=bayesian_likelihood_func),
parallel=parallel, dbname='RosenMLE', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.sceua(spot_setup(),
parallel=parallel, dbname='RosenSCEUA', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep,ngs=4)
results.append(sampler.getdata())
sampler=spotpy.algorithms.sa(spot_setup(obj_func=bayesian_likelihood_func),
parallel=parallel, dbname='RosenSA', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.rope(spot_setup(),
parallel=parallel, dbname='RosenROPE', dbformat=dbformat,sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.abc(spot_setup(),
parallel=parallel, dbname='RosenABC', dbformat=dbformat,sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.fscabc(spot_setup(),
parallel=parallel, dbname='RosenFSABC', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.mcmc(spot_setup(obj_func=bayesian_likelihood_func),
parallel=parallel, dbname='RosenMCMC', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep)
results.append(sampler.getdata())
sampler=spotpy.algorithms.demcz(spot_setup(obj_func = bayesian_likelihood_func),
parallel=parallel, dbname='RosenDEMCz', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep,nChains=4)
results.append(sampler.getdata())
sampler=spotpy.algorithms.dream(spot_setup(obj_func = bayesian_likelihood_func),
parallel=parallel, dbname='RosenDREAM', dbformat=dbformat, sim_timeout=timeout)
sampler.sample(rep,nChains=4)
results.append(sampler.getdata())
print(results[0].dtype) # Check for Travis: Get the last sampled parameter for x
evaluation = spot_setup().evaluation()
# Example how to plot the data
algorithms = ['mc','lhs','mle','sceua','sa','rope','abc','fscabc', 'mcmc', 'demcz', 'dream']
spotpy.analyser.plot_parametertrace_algorithms(results, algorithms, spot_setup()) | 0.557364 | 0.092033 |
import logging
from unittest.mock import patch
import pandas as pd
import pytest
import awswrangler as wr
from ._utils import ensure_athena_query_metadata
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
def test_athena_cache(path, glue_database, glue_table, workgroup1):
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table)
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=1, workgroup=workgroup1
)
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=900, workgroup=workgroup1
)
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
dfs = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=900, workgroup=workgroup1, chunksize=1
)
assert len(list(dfs)) == 2
@pytest.mark.parametrize("data_source", [None, "AwsDataCatalog"])
def test_cache_query_ctas_approach_true(path, glue_database, glue_table, data_source):
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
mode="overwrite",
database=glue_database,
table=glue_table,
description="c0",
parameters={"num_cols": str(len(df.columns)), "num_rows": str(len(df.index))},
columns_comments={"c0": "0"},
)
with patch(
"awswrangler.athena._read._check_for_cached_results",
return_value=wr.athena._read._CacheInfo(has_valid_cache=False),
) as mocked_cache_attempt:
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=True, max_cache_seconds=0, data_source=data_source
)
mocked_cache_attempt.assert_called()
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
with patch("awswrangler.athena._read._resolve_query_without_cache") as resolve_no_cache:
df3 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=True, max_cache_seconds=900, data_source=data_source
)
resolve_no_cache.assert_not_called()
assert df.shape == df3.shape
assert df.c0.sum() == df3.c0.sum()
ensure_athena_query_metadata(df=df3, ctas_approach=True, encrypted=False)
@pytest.mark.parametrize("data_source", [None, "AwsDataCatalog"])
def test_cache_query_ctas_approach_false(path, glue_database, glue_table, data_source):
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
mode="overwrite",
database=glue_database,
table=glue_table,
description="c0",
parameters={"num_cols": str(len(df.columns)), "num_rows": str(len(df.index))},
columns_comments={"c0": "0"},
)
with patch(
"awswrangler.athena._read._check_for_cached_results",
return_value=wr.athena._read._CacheInfo(has_valid_cache=False),
) as mocked_cache_attempt:
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=0, data_source=data_source
)
mocked_cache_attempt.assert_called()
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
with patch("awswrangler.athena._read._resolve_query_without_cache") as resolve_no_cache:
df3 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=900, data_source=data_source
)
resolve_no_cache.assert_not_called()
assert df.shape == df3.shape
assert df.c0.sum() == df3.c0.sum()
ensure_athena_query_metadata(df=df3, ctas_approach=False, encrypted=False)
def test_cache_query_semicolon(path, glue_database, glue_table):
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table)
with patch(
"awswrangler.athena._read._check_for_cached_results",
return_value=wr.athena._read._CacheInfo(has_valid_cache=False),
) as mocked_cache_attempt:
df2 = wr.athena.read_sql_query(
f"SELECT * FROM {glue_table}", database=glue_database, ctas_approach=True, max_cache_seconds=0
)
mocked_cache_attempt.assert_called()
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
with patch("awswrangler.athena._read._resolve_query_without_cache") as resolve_no_cache:
df3 = wr.athena.read_sql_query(
f"SELECT * FROM {glue_table};", database=glue_database, ctas_approach=True, max_cache_seconds=900
)
resolve_no_cache.assert_not_called()
assert df.shape == df3.shape
assert df.c0.sum() == df3.c0.sum()
def test_local_cache(path, glue_database, glue_table):
wr.config.max_local_cache_entries = 1
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table)
with patch(
"awswrangler.athena._read._check_for_cached_results",
return_value=wr.athena._read._CacheInfo(has_valid_cache=False),
) as mocked_cache_attempt:
df2 = wr.athena.read_sql_query(
f"SELECT * FROM {glue_table}", database=glue_database, ctas_approach=True, max_cache_seconds=0
)
mocked_cache_attempt.assert_called()
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
first_query_id = df2.query_metadata["QueryExecutionId"]
assert first_query_id in wr.athena._read._cache_manager
df3 = wr.athena.read_sql_query(
f"SELECT * FROM {glue_table}", database=glue_database, ctas_approach=True, max_cache_seconds=0
)
mocked_cache_attempt.assert_called()
assert df.shape == df3.shape
assert df.c0.sum() == df3.c0.sum()
second_query_id = df3.query_metadata["QueryExecutionId"]
assert first_query_id not in wr.athena._read._cache_manager
assert second_query_id in wr.athena._read._cache_manager
def test_paginated_remote_cache(path, glue_database, glue_table, workgroup1):
wr.config.max_remote_cache_entries = 100
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table)
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=1, workgroup=workgroup1
)
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum() | tests/test_athena_cache.py | import logging
from unittest.mock import patch
import pandas as pd
import pytest
import awswrangler as wr
from ._utils import ensure_athena_query_metadata
logging.getLogger("awswrangler").setLevel(logging.DEBUG)
def test_athena_cache(path, glue_database, glue_table, workgroup1):
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table)
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=1, workgroup=workgroup1
)
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=900, workgroup=workgroup1
)
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
dfs = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=900, workgroup=workgroup1, chunksize=1
)
assert len(list(dfs)) == 2
@pytest.mark.parametrize("data_source", [None, "AwsDataCatalog"])
def test_cache_query_ctas_approach_true(path, glue_database, glue_table, data_source):
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
mode="overwrite",
database=glue_database,
table=glue_table,
description="c0",
parameters={"num_cols": str(len(df.columns)), "num_rows": str(len(df.index))},
columns_comments={"c0": "0"},
)
with patch(
"awswrangler.athena._read._check_for_cached_results",
return_value=wr.athena._read._CacheInfo(has_valid_cache=False),
) as mocked_cache_attempt:
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=True, max_cache_seconds=0, data_source=data_source
)
mocked_cache_attempt.assert_called()
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
with patch("awswrangler.athena._read._resolve_query_without_cache") as resolve_no_cache:
df3 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=True, max_cache_seconds=900, data_source=data_source
)
resolve_no_cache.assert_not_called()
assert df.shape == df3.shape
assert df.c0.sum() == df3.c0.sum()
ensure_athena_query_metadata(df=df3, ctas_approach=True, encrypted=False)
@pytest.mark.parametrize("data_source", [None, "AwsDataCatalog"])
def test_cache_query_ctas_approach_false(path, glue_database, glue_table, data_source):
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(
df=df,
path=path,
dataset=True,
mode="overwrite",
database=glue_database,
table=glue_table,
description="c0",
parameters={"num_cols": str(len(df.columns)), "num_rows": str(len(df.index))},
columns_comments={"c0": "0"},
)
with patch(
"awswrangler.athena._read._check_for_cached_results",
return_value=wr.athena._read._CacheInfo(has_valid_cache=False),
) as mocked_cache_attempt:
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=0, data_source=data_source
)
mocked_cache_attempt.assert_called()
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
with patch("awswrangler.athena._read._resolve_query_without_cache") as resolve_no_cache:
df3 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=900, data_source=data_source
)
resolve_no_cache.assert_not_called()
assert df.shape == df3.shape
assert df.c0.sum() == df3.c0.sum()
ensure_athena_query_metadata(df=df3, ctas_approach=False, encrypted=False)
def test_cache_query_semicolon(path, glue_database, glue_table):
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table)
with patch(
"awswrangler.athena._read._check_for_cached_results",
return_value=wr.athena._read._CacheInfo(has_valid_cache=False),
) as mocked_cache_attempt:
df2 = wr.athena.read_sql_query(
f"SELECT * FROM {glue_table}", database=glue_database, ctas_approach=True, max_cache_seconds=0
)
mocked_cache_attempt.assert_called()
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
with patch("awswrangler.athena._read._resolve_query_without_cache") as resolve_no_cache:
df3 = wr.athena.read_sql_query(
f"SELECT * FROM {glue_table};", database=glue_database, ctas_approach=True, max_cache_seconds=900
)
resolve_no_cache.assert_not_called()
assert df.shape == df3.shape
assert df.c0.sum() == df3.c0.sum()
def test_local_cache(path, glue_database, glue_table):
wr.config.max_local_cache_entries = 1
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table)
with patch(
"awswrangler.athena._read._check_for_cached_results",
return_value=wr.athena._read._CacheInfo(has_valid_cache=False),
) as mocked_cache_attempt:
df2 = wr.athena.read_sql_query(
f"SELECT * FROM {glue_table}", database=glue_database, ctas_approach=True, max_cache_seconds=0
)
mocked_cache_attempt.assert_called()
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum()
first_query_id = df2.query_metadata["QueryExecutionId"]
assert first_query_id in wr.athena._read._cache_manager
df3 = wr.athena.read_sql_query(
f"SELECT * FROM {glue_table}", database=glue_database, ctas_approach=True, max_cache_seconds=0
)
mocked_cache_attempt.assert_called()
assert df.shape == df3.shape
assert df.c0.sum() == df3.c0.sum()
second_query_id = df3.query_metadata["QueryExecutionId"]
assert first_query_id not in wr.athena._read._cache_manager
assert second_query_id in wr.athena._read._cache_manager
def test_paginated_remote_cache(path, glue_database, glue_table, workgroup1):
wr.config.max_remote_cache_entries = 100
df = pd.DataFrame({"c0": [0, None]}, dtype="Int64")
wr.s3.to_parquet(df=df, path=path, dataset=True, mode="overwrite", database=glue_database, table=glue_table)
df2 = wr.athena.read_sql_table(
glue_table, glue_database, ctas_approach=False, max_cache_seconds=1, workgroup=workgroup1
)
assert df.shape == df2.shape
assert df.c0.sum() == df2.c0.sum() | 0.587707 | 0.434041 |
from typing import List, NamedTuple
from genyrator import Entity
import genyrator.entities.Template as Template
from genyrator.entities.Template import create_template
from genyrator.types import TypeOption
TemplateConfig = NamedTuple('TemplateConfig', [
('root_files', List[Template.Template]),
('core', List[Template.Template]),
('db_init', List[Template.Template]),
('db_models', List[Template.Template]),
('fixtures', List[Template.Template]),
('domain_models', List[Template.Template]),
('resources', List[Template.Template]), ])
def create_template_config(
module_name: str,
db_import_path: str,
entities: List[Entity],
api_name: str,
api_description: str,
) -> TemplateConfig:
root_files = [
create_template(
Template.RootInit, ['__init__'], module_name=module_name, db_import_path=db_import_path,
),
create_template(Template.Config, ['config'], module_name=module_name),
]
core_files = [
create_template(Template.Template, ['core', 'convert_case']),
create_template(Template.ConvertDict, ['core', 'convert_dict'], module_name=module_name),
]
db_init = [
create_template(Template.Template, ['sqlalchemy', '__init__']),
]
db_models = [
*[create_template(
Template.SQLAlchemyModel, ['sqlalchemy', 'model', 'sqlalchemy_model'],
db_import_path=db_import_path, entity=e, module_name=module_name,
out_path=Template.OutPath((['sqlalchemy', 'model'], e.class_name))
) for e in entities],
create_template(
Template.SQLAlchemyModelInit, ['sqlalchemy', 'model', '__init__'], db_import_path=db_import_path,
imports=[Template.Import(e.class_name, [e.class_name]) for e in entities], module_name=module_name,
),
create_template(Template.ModelToDict, ['sqlalchemy', 'model_to_dict'], module_name=module_name),
create_template(Template.ConvertProperties, ['sqlalchemy', 'convert_properties'], module_name=module_name),
create_template(Template.ConvertModels, ['sqlalchemy', 'convert_between_models'], module_name=module_name),
create_template(Template.JoinEntities, ['sqlalchemy', 'join_entities'], module_name=module_name),
create_template(Template.Template, ['sqlalchemy', 'model', 'types']),
create_template(
Template.ConvertDictToMarshmallow,
['sqlalchemy', 'convert_dict_to_marshmallow_result'],
module_name=module_name, db_import_path=db_import_path,
),
]
fixtures = [
create_template(Template.Template, ['sqlalchemy', 'fixture', '__init__']),
*[create_template(
Template.Fixture, ['sqlalchemy', 'fixture', 'fixture'], module_name=module_name,
db_import_path=db_import_path, out_path=Template.OutPath((['sqlalchemy', 'fixture'], entity.class_name)),
entity=entity,
) for entity in entities]
]
domain_models = [
create_template(Template.Template, ['domain', 'types']),
*[create_template(
Template.DomainModel, ['domain', 'domain_model'], module_name=module_name, entity=entity,
out_path=Template.OutPath((['domain'], entity.class_name))
) for entity in entities]
]
resources = [
create_template(
Template.RootSchema, ['schema'], module_name=module_name, entities=entities
),
*[create_template(
Template.Resource, ['resources', 'resource'],
entity=entity, out_path=Template.OutPath((['resources'], entity.class_name)),
db_import_path=db_import_path, module_name=module_name,
restplus_template=create_template(
Template.RestplusModel, ['resources', 'restplus_model'], entity=entity
).render(),
TypeOption=TypeOption,
) for entity in entities],
create_template(
Template.ResourcesInit, ['resources', '__init__'], entities=entities,
module_name=module_name, api_name=api_name, api_description=api_description,
),
]
return TemplateConfig(
root_files=root_files,
core=core_files,
db_init=db_init,
db_models=db_models,
fixtures=fixtures,
domain_models=domain_models,
resources=resources,
) | genyrator/template_config.py | from typing import List, NamedTuple
from genyrator import Entity
import genyrator.entities.Template as Template
from genyrator.entities.Template import create_template
from genyrator.types import TypeOption
TemplateConfig = NamedTuple('TemplateConfig', [
('root_files', List[Template.Template]),
('core', List[Template.Template]),
('db_init', List[Template.Template]),
('db_models', List[Template.Template]),
('fixtures', List[Template.Template]),
('domain_models', List[Template.Template]),
('resources', List[Template.Template]), ])
def create_template_config(
module_name: str,
db_import_path: str,
entities: List[Entity],
api_name: str,
api_description: str,
) -> TemplateConfig:
root_files = [
create_template(
Template.RootInit, ['__init__'], module_name=module_name, db_import_path=db_import_path,
),
create_template(Template.Config, ['config'], module_name=module_name),
]
core_files = [
create_template(Template.Template, ['core', 'convert_case']),
create_template(Template.ConvertDict, ['core', 'convert_dict'], module_name=module_name),
]
db_init = [
create_template(Template.Template, ['sqlalchemy', '__init__']),
]
db_models = [
*[create_template(
Template.SQLAlchemyModel, ['sqlalchemy', 'model', 'sqlalchemy_model'],
db_import_path=db_import_path, entity=e, module_name=module_name,
out_path=Template.OutPath((['sqlalchemy', 'model'], e.class_name))
) for e in entities],
create_template(
Template.SQLAlchemyModelInit, ['sqlalchemy', 'model', '__init__'], db_import_path=db_import_path,
imports=[Template.Import(e.class_name, [e.class_name]) for e in entities], module_name=module_name,
),
create_template(Template.ModelToDict, ['sqlalchemy', 'model_to_dict'], module_name=module_name),
create_template(Template.ConvertProperties, ['sqlalchemy', 'convert_properties'], module_name=module_name),
create_template(Template.ConvertModels, ['sqlalchemy', 'convert_between_models'], module_name=module_name),
create_template(Template.JoinEntities, ['sqlalchemy', 'join_entities'], module_name=module_name),
create_template(Template.Template, ['sqlalchemy', 'model', 'types']),
create_template(
Template.ConvertDictToMarshmallow,
['sqlalchemy', 'convert_dict_to_marshmallow_result'],
module_name=module_name, db_import_path=db_import_path,
),
]
fixtures = [
create_template(Template.Template, ['sqlalchemy', 'fixture', '__init__']),
*[create_template(
Template.Fixture, ['sqlalchemy', 'fixture', 'fixture'], module_name=module_name,
db_import_path=db_import_path, out_path=Template.OutPath((['sqlalchemy', 'fixture'], entity.class_name)),
entity=entity,
) for entity in entities]
]
domain_models = [
create_template(Template.Template, ['domain', 'types']),
*[create_template(
Template.DomainModel, ['domain', 'domain_model'], module_name=module_name, entity=entity,
out_path=Template.OutPath((['domain'], entity.class_name))
) for entity in entities]
]
resources = [
create_template(
Template.RootSchema, ['schema'], module_name=module_name, entities=entities
),
*[create_template(
Template.Resource, ['resources', 'resource'],
entity=entity, out_path=Template.OutPath((['resources'], entity.class_name)),
db_import_path=db_import_path, module_name=module_name,
restplus_template=create_template(
Template.RestplusModel, ['resources', 'restplus_model'], entity=entity
).render(),
TypeOption=TypeOption,
) for entity in entities],
create_template(
Template.ResourcesInit, ['resources', '__init__'], entities=entities,
module_name=module_name, api_name=api_name, api_description=api_description,
),
]
return TemplateConfig(
root_files=root_files,
core=core_files,
db_init=db_init,
db_models=db_models,
fixtures=fixtures,
domain_models=domain_models,
resources=resources,
) | 0.536313 | 0.123207 |
from flask import Flask, render_template, request, redirect, url_for, session
from azure.storage.blob import BlockBlobService
from azure.storage.blob import PublicAccess
from azure.storage.blob import ContentSettings
import mysql.connector
from mysql.connector import errorcode
import os
import csv
import random
from datetime import datetime
app = Flask(__name__)
#clone successfull
config = {
'host':'myserver-mysql-ashu.mysql.database.azure.com',
'user':'root123@myserver-mysql-ashu',
'password':'<PASSWORD>',
'database':'mysqlashudb',
'ssl_ca':'BaltimoreCyberTrustRoot.crt.pem'
}
#block_blob_service = BlockBlobService(account_name='ashuazurestorage', account_key='<KEY>')
#block_blob_service.set_container_acl('ashu-blob-container', public_access=PublicAccess.Container)
@app.route('/')
def index():
return redirect(url_for('login'))
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
username = request.form['username']
session['logged_in'] = True
session['username'] = username
time_start = datetime.now()
session['time'] = time_start
return redirect(url_for('dashboard'))
return render_template('login.html')
@app.route('/randomQueries')
def randomQueries():
return render_template('randomQueries.html')
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
@app.route('/randomqueriesupto1000', methods=['GET','POST'])
def randomQueriesUpto1000():
if request.method == 'POST':
number = request.form['ChooseNumber']
print("number choosen %s" %(number))
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
time_start = datetime.now()
num = 0.5
number = int(number)
for i in range(0,number):
randomnum = random.uniform(0,1)
#print("random number %s" %(randomnum))
#result = cursor.execute("SELECT mag FROM earthquake_table WHERE mag >= %s AND mag <= %s ;", (randomnum,randomnum+1.0))
result = cursor.execute("SELECT COUNT(mag) FROM earthquake_table WHERE mag = %s ;", [num])
articles = cursor.fetchone()
num = num + 0.01
num = round(num, 2)
print(num)
print(articles)
cursor.close()
conn.close()
time_end = datetime.now()
time_diff = time_end - time_start
return render_template('complete.html', time_diff=time_diff)
return render_template('Randomqueriesupto1000.html')
@app.route('/restrictedQueries')
def restrictedQueries():
return render_template('restrictedQueries.html')
@app.route('/searchWithinDistance', methods=['GET','POST'])
def searchWithinDistance():
if request.method == 'POST':
latitude = request.form['latitude']
longitude = request.form['longitude']
distance = request.form['distance']
#print("latitude %s " % (latitude))
time_start = datetime.now()
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
cursor.execute("SELECT latitude, longitude, place, SQRT(POW(69.1 * (latitude - %s), 2) + POW(69.1 * (%s - longitude) * COS(latitude / 57.3), 2)) AS distance FROM earthquake_table HAVING distance < %s ORDER BY distance ;", (latitude, longitude, distance))
result = cursor.fetchall()
#print(result)
cursor.close()
conn.close()
time_end = datetime.now()
time_diff = time_end - time_start
return render_template('complete.html',time_diff=time_diff)
return render_template('searchWithinDistance.html')
@app.route('/searchWithPlaceName', methods=['GET','POST'])
def searchWithPlaceName():
if request.method == 'POST':
state = request.form['state']
print("state %s " % (state))
time_start = datetime.now()
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
# "{0} LIKE '%{1}'".format(field, value_suffix)
query = "SELECT latitude, longitude, place FROM earthquake_table WHERE place LIKE '%{0}'".format(state)
cursor.execute(query)
results = cursor.fetchall()
#print(results[0])
cursor.close()
conn.close()
time_end = datetime.now()
time_diff = time_end - time_start
print("time_diff %s" % (time_diff))
timediff = str(time_diff)
print("time_diff in string %s" % (timediff))
session['time_diff'] = timediff
return render_template('SearchWithPlaceNameResult.html', results=results)
return render_template('SearchWithPlaceName.html')
@app.route('/createDB')
def createDB():
fileread = open('all_month.csv','rt')
file_reader = csv.reader(fileread)
dataMonth = []
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
time_start = datetime.now()
cursor.execute("DROP TABLE IF EXISTS earthquake_table;")
cursor.execute("CREATE TABLE earthquake_table(time INT(11), latitude DECIMAL(10,8), longitude DECIMAL(11,8), depth DECIMAL(5,2), mag DECIMAL(5,2), magType VARCHAR(10), nst INT, gap DECIMAL(5,4), dmin DECIMAL(10,10), rms DECIMAL(7,7), net VARCHAR(10), id VARCHAR(25), updated INT(11), place VARCHAR(50), type VARCHAR(15), horizontalError DECIMAL(5,5), depthError DECIMAL(5,5), magError DECIMAL(5,5), magNst INT, status VARCHAR(15), locationSource VARCHAR(10), magSource VARCHAR(10));")
line = 0;
for attr in file_reader:
if line == 0:
line = 1
else:
s1 = str(attr[0])
s2 = str(attr[12])
dt_obj1 = datetime.strptime(s1, '%Y-%m-%dT%H:%M:%S.%fZ')
dt_obj2 = datetime.strptime(s2, '%Y-%m-%dT%H:%M:%S.%fZ')
millisec1 = dt_obj1.timestamp() * 1000
millisec2 = dt_obj2.timestamp() * 1000
data = []
data.extend((millisec1, attr[1], attr[2],attr[3], attr[4], attr[5], attr[6], attr[7], attr[8], attr[9], attr[10], attr[11], millisec2, attr[13], attr[14], attr[15], attr[16], attr[17], attr[18], attr[19], attr[20], attr[21]))
dataMonth.append(data)
#cursor.executemany("""INSERT INTO earthquake_table (time, latitude, longitude, depth, mag, magType, nst, gap, dmin, rms, net, id, updated, place, type, horizontalError, depthError, magError, magNst, status, locationSource, magSource) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", [(millisec1, attr[1], attr[2],attr[3], attr[4], attr[5], attr[6], attr[7], attr[8], attr[9], attr[10], attr[11], millisec2, attr[13], attr[14], attr[15], attr[16], attr[17], attr[18], attr[19], attr[20], attr[21])])
stmt = """INSERT INTO earthquake_table (time, latitude, longitude, depth, mag, magType, nst, gap, dmin, rms, net, id, updated, place, type, horizontalError, depthError, magError, magNst, status, locationSource, magSource) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
cursor.executemany(stmt, dataMonth)
conn.commit()
cursor.close()
conn.close()
time_end = datetime.now()
time_diff = time_end - time_start
return render_template('complete.html', time_diff=time_diff)
if __name__ == '__main__':
app.secret_key = 'mysecretkey'
app.run(debug=True)
app.secret_key = 'secretkey' | main.py | from flask import Flask, render_template, request, redirect, url_for, session
from azure.storage.blob import BlockBlobService
from azure.storage.blob import PublicAccess
from azure.storage.blob import ContentSettings
import mysql.connector
from mysql.connector import errorcode
import os
import csv
import random
from datetime import datetime
app = Flask(__name__)
#clone successfull
config = {
'host':'myserver-mysql-ashu.mysql.database.azure.com',
'user':'root123@myserver-mysql-ashu',
'password':'<PASSWORD>',
'database':'mysqlashudb',
'ssl_ca':'BaltimoreCyberTrustRoot.crt.pem'
}
#block_blob_service = BlockBlobService(account_name='ashuazurestorage', account_key='<KEY>')
#block_blob_service.set_container_acl('ashu-blob-container', public_access=PublicAccess.Container)
@app.route('/')
def index():
return redirect(url_for('login'))
@app.route('/login', methods=['POST', 'GET'])
def login():
if request.method == 'POST':
username = request.form['username']
session['logged_in'] = True
session['username'] = username
time_start = datetime.now()
session['time'] = time_start
return redirect(url_for('dashboard'))
return render_template('login.html')
@app.route('/randomQueries')
def randomQueries():
return render_template('randomQueries.html')
@app.route('/dashboard')
def dashboard():
return render_template('dashboard.html')
@app.route('/randomqueriesupto1000', methods=['GET','POST'])
def randomQueriesUpto1000():
if request.method == 'POST':
number = request.form['ChooseNumber']
print("number choosen %s" %(number))
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
time_start = datetime.now()
num = 0.5
number = int(number)
for i in range(0,number):
randomnum = random.uniform(0,1)
#print("random number %s" %(randomnum))
#result = cursor.execute("SELECT mag FROM earthquake_table WHERE mag >= %s AND mag <= %s ;", (randomnum,randomnum+1.0))
result = cursor.execute("SELECT COUNT(mag) FROM earthquake_table WHERE mag = %s ;", [num])
articles = cursor.fetchone()
num = num + 0.01
num = round(num, 2)
print(num)
print(articles)
cursor.close()
conn.close()
time_end = datetime.now()
time_diff = time_end - time_start
return render_template('complete.html', time_diff=time_diff)
return render_template('Randomqueriesupto1000.html')
@app.route('/restrictedQueries')
def restrictedQueries():
return render_template('restrictedQueries.html')
@app.route('/searchWithinDistance', methods=['GET','POST'])
def searchWithinDistance():
if request.method == 'POST':
latitude = request.form['latitude']
longitude = request.form['longitude']
distance = request.form['distance']
#print("latitude %s " % (latitude))
time_start = datetime.now()
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
cursor.execute("SELECT latitude, longitude, place, SQRT(POW(69.1 * (latitude - %s), 2) + POW(69.1 * (%s - longitude) * COS(latitude / 57.3), 2)) AS distance FROM earthquake_table HAVING distance < %s ORDER BY distance ;", (latitude, longitude, distance))
result = cursor.fetchall()
#print(result)
cursor.close()
conn.close()
time_end = datetime.now()
time_diff = time_end - time_start
return render_template('complete.html',time_diff=time_diff)
return render_template('searchWithinDistance.html')
@app.route('/searchWithPlaceName', methods=['GET','POST'])
def searchWithPlaceName():
if request.method == 'POST':
state = request.form['state']
print("state %s " % (state))
time_start = datetime.now()
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
# "{0} LIKE '%{1}'".format(field, value_suffix)
query = "SELECT latitude, longitude, place FROM earthquake_table WHERE place LIKE '%{0}'".format(state)
cursor.execute(query)
results = cursor.fetchall()
#print(results[0])
cursor.close()
conn.close()
time_end = datetime.now()
time_diff = time_end - time_start
print("time_diff %s" % (time_diff))
timediff = str(time_diff)
print("time_diff in string %s" % (timediff))
session['time_diff'] = timediff
return render_template('SearchWithPlaceNameResult.html', results=results)
return render_template('SearchWithPlaceName.html')
@app.route('/createDB')
def createDB():
fileread = open('all_month.csv','rt')
file_reader = csv.reader(fileread)
dataMonth = []
try:
conn = mysql.connector.connect(**config)
print("Connection established")
except mysql.connector.Error as err:
if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:
print("Something is wrong with the user name or password")
elif err.errno == errorcode.ER_BAD_DB_ERROR:
print("Database does not exist")
else:
print(err)
else:
cursor = conn.cursor()
time_start = datetime.now()
cursor.execute("DROP TABLE IF EXISTS earthquake_table;")
cursor.execute("CREATE TABLE earthquake_table(time INT(11), latitude DECIMAL(10,8), longitude DECIMAL(11,8), depth DECIMAL(5,2), mag DECIMAL(5,2), magType VARCHAR(10), nst INT, gap DECIMAL(5,4), dmin DECIMAL(10,10), rms DECIMAL(7,7), net VARCHAR(10), id VARCHAR(25), updated INT(11), place VARCHAR(50), type VARCHAR(15), horizontalError DECIMAL(5,5), depthError DECIMAL(5,5), magError DECIMAL(5,5), magNst INT, status VARCHAR(15), locationSource VARCHAR(10), magSource VARCHAR(10));")
line = 0;
for attr in file_reader:
if line == 0:
line = 1
else:
s1 = str(attr[0])
s2 = str(attr[12])
dt_obj1 = datetime.strptime(s1, '%Y-%m-%dT%H:%M:%S.%fZ')
dt_obj2 = datetime.strptime(s2, '%Y-%m-%dT%H:%M:%S.%fZ')
millisec1 = dt_obj1.timestamp() * 1000
millisec2 = dt_obj2.timestamp() * 1000
data = []
data.extend((millisec1, attr[1], attr[2],attr[3], attr[4], attr[5], attr[6], attr[7], attr[8], attr[9], attr[10], attr[11], millisec2, attr[13], attr[14], attr[15], attr[16], attr[17], attr[18], attr[19], attr[20], attr[21]))
dataMonth.append(data)
#cursor.executemany("""INSERT INTO earthquake_table (time, latitude, longitude, depth, mag, magType, nst, gap, dmin, rms, net, id, updated, place, type, horizontalError, depthError, magError, magNst, status, locationSource, magSource) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);""", [(millisec1, attr[1], attr[2],attr[3], attr[4], attr[5], attr[6], attr[7], attr[8], attr[9], attr[10], attr[11], millisec2, attr[13], attr[14], attr[15], attr[16], attr[17], attr[18], attr[19], attr[20], attr[21])])
stmt = """INSERT INTO earthquake_table (time, latitude, longitude, depth, mag, magType, nst, gap, dmin, rms, net, id, updated, place, type, horizontalError, depthError, magError, magNst, status, locationSource, magSource) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
cursor.executemany(stmt, dataMonth)
conn.commit()
cursor.close()
conn.close()
time_end = datetime.now()
time_diff = time_end - time_start
return render_template('complete.html', time_diff=time_diff)
if __name__ == '__main__':
app.secret_key = 'mysecretkey'
app.run(debug=True)
app.secret_key = 'secretkey' | 0.269902 | 0.076822 |
from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from rest_framework.test import APITestCase
from menus_project import constants as c
from menus_project import factories as f
from . import serializers, views
from .permissions import HasRestaurantPermissionsOrReadOnly
from restaurants.models import Restaurant
from menus.models import Menu, MenuSection, MenuItem
class IsUsernameAvailableTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.is_username_available
# create model objects
cls.test_user = f.UserFactory()
def test_available_username(self):
available_username = 'available_username'
self.current_test_url = reverse('api:is_username_available', kwargs = {
'username': available_username})
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
response_json = self.response.content.decode('utf-8')
self.assertJSONEqual(response_json, {"isUsernameAvailable": True})
def test_unavailable_username(self):
unavailable_username = self.test_user.username
self.current_test_url = reverse('api:is_username_available', kwargs = {
'username': unavailable_username})
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
response_json = self.response.content.decode('utf-8')
self.assertJSONEqual(response_json, {"isUsernameAvailable": False})
class IsEmailAvailableTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.is_email_available
# create model objects
cls.test_user = f.UserFactory()
def test_available_username(self):
available_email = '<EMAIL>'
self.current_test_url = reverse('api:is_email_available', kwargs = {
'email': available_email})
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
response_json = self.response.content.decode('utf-8')
self.assertJSONEqual(response_json, {"isEmailAvailable": True})
def test_unavailable_username(self):
unavailable_email = self.test_user.email
self.current_test_url = reverse('api:is_email_available', kwargs = {
'email': unavailable_email})
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
response_json = self.response.content.decode('utf-8')
self.assertJSONEqual(response_json, {"isEmailAvailable": False})
class RestaurantListTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.RestaurantList
# create model objects
cls.restaurant_admin_user = f.UserFactory()
cls.test_restaurants = f.RestaurantFactory.create_batch(3)
# generate test url
cls.current_test_url = reverse('api:restaurant_list')
def setUp(self):
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'RestaurantList')
def test_view_parent_class(self):
self.assertEqual(self.view.__bases__[-1], generics.ListCreateAPIView)
def test_permission_classes(self):
self.assertEqual(self.view.permission_classes, [IsAuthenticated])
def test_queryset(self):
self.assertEqual(
repr(self.view.queryset), repr(Restaurant.objects.all()))
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.RestaurantSerializer)
# request.GET
def test_request_get_method_list_objects_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_list_objects_authenticated_user(self):
# get expected objects from serializer
restaurants = Restaurant.objects.all()
serializer = serializers.RestaurantSerializer(restaurants, many=True)
# get actual objects from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the actual result
self.assertEqual(self.response.data, serializer.data)
# request.POST
def test_request_post_method_create_object_unauthenticated_user(self):
post_data = {'name': 'Created Restaurant'}
self.client.logout()
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authenticated_user(self):
post_data = {'name': 'Created Restaurant'}
# create a new object, with a before-and-after count
old_restaurant_count = Restaurant.objects.count()
self.response = self.client.post(self.current_test_url, post_data)
new_restaurant_count = Restaurant.objects.count()
# new object created successfully
self.assertEqual(self.response.status_code, 201)
self.assertEqual(old_restaurant_count + 1, new_restaurant_count)
# new object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
self.assertEqual(
self.response.data['admin_users'], [self.restaurant_admin_user.pk])
class RestaurantDetailTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.RestaurantDetail
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
def setUp(self):
self.test_restaurant = \
f.RestaurantFactory(admin_users=[self.restaurant_admin_user])
# generate test url
self.kwargs = {'restaurant_pk': self.test_restaurant.pk}
self.current_test_url = \
reverse('api:restaurant_detail', kwargs=self.kwargs)
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'RestaurantDetail')
def test_view_parent_class(self):
self.assertEqual(
self.view.__bases__[-1], generics.RetrieveUpdateDestroyAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'restaurant_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.RestaurantSerializer)
# request.GET
def test_request_get_method_retrieve_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_retrieve_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_retrieve_object_authorized_user(self):
# get expected result from serializer
restaurant = Restaurant.objects.filter(pk=self.test_restaurant.pk)
serializer = serializers.RestaurantSerializer(restaurant, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, dict(serializer.data[0]))
# request.PUT
def test_request_put_method_update_object_unauthenticated_user(self):
post_data = {'name': 'Updated Restaurant Name'}
self.client.logout()
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authenticated_user(self):
post_data = {'name': 'Updated Restaurant Name'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authorized_user(self):
post_data = {'name': 'Updated Restaurant Name'}
old_restaurant_count = Restaurant.objects.count()
self.response = self.client.put(self.current_test_url, post_data)
new_restaurant_count = Restaurant.objects.count()
# object updated successfully
self.assertEqual(self.response.status_code, 200)
self.assertEqual(old_restaurant_count, new_restaurant_count)
# new object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
# request.DELETE
def test_request_delete_method_destroy_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>USER_PASSWORD)
self.assertTrue(login_successful)
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authorized_user(self):
# delete the object
old_restaurant_count = Restaurant.objects.count()
self.response = self.client.delete(self.current_test_url)
new_restaurant_count = Restaurant.objects.count()
# object deleted successfully, and object count decreased by one
self.assertEqual(self.response.status_code, 204)
self.assertEqual(old_restaurant_count - 1, new_restaurant_count)
class MenuListTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuList
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
cls.test_restaurant = f.RestaurantFactory()
cls.test_menus = f.MenuFactory.create_batch(
size=3,
restaurant=cls.test_restaurant,
admin_users=[cls.restaurant_admin_user])
# generate test url
cls.kwargs = {'restaurant_pk': cls.test_menus[0].pk}
cls.current_test_url = reverse('api:menu_list', kwargs=cls.kwargs)
def setUp(self):
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuList')
def test_view_parent_class(self):
self.assertEqual(self.view.__bases__[-1], generics.ListCreateAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'restaurant_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuSerializer)
# request.GET
def test_request_get_method_list_objects_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_list_objects_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_list_objects_authorized_user(self):
# get expected result from serializer
menus = Menu.objects.filter(
restaurant__pk=self.test_menus[0].restaurant.pk)
serializer = serializers.MenuSerializer(menus, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, serializer.data)
# request.POST
def test_request_post_method_create_object_unauthenticated_user(self):
post_data = {'name': 'Created Menu'}
self.client.logout()
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authenticated_user(self):
post_data = {'name': 'Created Menu'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authorized_user(self):
post_data = {'name': 'Created Menu'}
# create a new object, with a before-and-after count
old_menu_count = Menu.objects.count()
self.response = self.client.post(self.current_test_url, post_data)
new_menu_count = Menu.objects.count()
# new object created successfully
self.assertEqual(self.response.status_code, 201)
self.assertEqual(old_menu_count + 1, new_menu_count)
# new object has correct parameters
self.assertEqual(
self.response.data['restaurant_name'], self.test_restaurant.name)
self.assertEqual(self.response.data['name'], post_data['name'])
class MenuDetailTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuDetail
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
def setUp(self):
self.test_menu = \
f.MenuFactory(admin_users=[self.restaurant_admin_user])
# generate test url
self.kwargs = {'restaurant_pk': self.test_menu.restaurant.pk,
'menu_pk': self.test_menu.pk}
self.current_test_url = \
reverse('api:menu_detail', kwargs=self.kwargs)
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
self.response = self.client.get(self.current_test_url)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuDetail')
def test_view_parent_class(self):
self.assertEqual(
self.view.__bases__[-1], generics.RetrieveUpdateDestroyAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menu_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuSerializer)
# request.GET
def test_request_get_method_retrieve_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_retrieve_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_retrieve_object_authorized_user(self):
# get expected result from serializer
menu = Menu.objects.filter(pk=self.test_menu.pk)
serializer = serializers.MenuSerializer(menu, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, dict(serializer.data[0]))
# request.PUT
def test_request_put_method_update_object_unauthenticated_user(self):
post_data = {'name': 'Updated Menu Name'}
self.client.logout()
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authenticated_user(self):
post_data = {'name': 'Updated Menu Name'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authorized_user(self):
post_data = {'name': 'Updated Menu Name'}
old_menu_count = Menu.objects.count()
self.response = self.client.put(self.current_test_url, post_data)
new_menu_count = Menu.objects.count()
# object updated successfully
self.assertEqual(self.response.status_code, 200)
self.assertEqual(old_menu_count, new_menu_count)
# new object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
# request.DELETE
def test_request_delete_method_destroy_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authorized_user(self):
# delete the object
old_menu_count = Menu.objects.count()
self.response = self.client.delete(self.current_test_url)
new_menu_count = Menu.objects.count()
# object deleted successfully, and object count decreased by one
self.assertEqual(self.response.status_code, 204)
self.assertEqual(old_menu_count - 1, new_menu_count)
class MenuSectionListTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuSectionList
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
cls.test_menu = f.MenuFactory()
cls.test_menusections = f.MenuSectionFactory.create_batch(
size=3,
menu=cls.test_menu,
admin_users=[cls.restaurant_admin_user])
# generate test url
cls.kwargs = {'restaurant_pk': cls.test_menu.restaurant.pk,
'menu_pk': cls.test_menu.pk}
cls.current_test_url = \
reverse('api:menusection_list', kwargs=cls.kwargs)
def setUp(self):
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuSectionList')
def test_view_parent_class(self):
self.assertEqual(self.view.__bases__[-1], generics.ListCreateAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menu_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuSectionSerializer)
# request.GET
def test_request_get_method_list_objects_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_list_objects_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_list_objects_authorized_user(self):
# get expected objects from serializer
menusections = \
MenuSection.objects.filter(menu__pk=self.test_menu.pk)
serializer = serializers.MenuSectionSerializer(menusections, many=True)
# get actual objects from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the actual result
self.assertEqual(self.response.data, serializer.data)
# request.POST
def test_request_post_method_create_object_unauthenticated_user(self):
post_data = {'name': 'Created Menu Section'}
self.client.logout()
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authenticated_user(self):
post_data = {'name': 'Created Menu Section'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authorized_user(self):
post_data = {'name': 'Created Menu Section'}
# create a new object, with a before-and-after count
old_menusection_count = MenuSection.objects.count()
self.response = self.client.post(self.current_test_url, post_data)
new_menusection_count = MenuSection.objects.count()
# new object created successfully
self.assertEqual(self.response.status_code, 201)
self.assertEqual(old_menusection_count + 1, new_menusection_count)
# new object has correct parameters
self.assertEqual(
self.response.data['menu_name'], self.test_menu.name)
self.assertEqual(self.response.data['name'], post_data['name'])
class MenuSectionDetailTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuSectionDetail
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
def setUp(self):
self.test_menusection = \
f.MenuSectionFactory(admin_users=[self.restaurant_admin_user])
# generate test url
self.kwargs = {
'restaurant_pk': self.test_menusection.menu.restaurant.pk,
'menu_pk': self.test_menusection.menu.pk,
'menusection_pk': self.test_menusection.pk}
self.current_test_url = \
reverse('api:menusection_detail', kwargs=self.kwargs)
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
self.response = self.client.get(self.current_test_url)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuSectionDetail')
def test_view_parent_class(self):
self.assertEqual(
self.view.__bases__[-1], generics.RetrieveUpdateDestroyAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menusection_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuSectionSerializer)
# request.GET
def test_request_get_method_retrieve_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_retrieve_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_retrieve_object_authorized_user(self):
# get expected result from serializer
menusection = MenuSection.objects.filter(pk=self.test_menusection.pk)
serializer = serializers.MenuSectionSerializer(menusection, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, dict(serializer.data[0]))
# request.PUT
def test_request_put_method_update_object_unauthenticated_user(self):
post_data = {'name': 'Updated Menu Section Name'}
self.client.logout()
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authenticated_user(self):
post_data = {'name': 'Updated Menu Section Name'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authorized_user(self):
post_data = {'name': 'Updated Menu Section Name'}
# update the object
old_menusection_count = MenuSection.objects.count()
self.response = self.client.put(self.current_test_url, post_data)
new_menusection_count = MenuSection.objects.count()
# object updated successfully, object count is unchanged
self.assertEqual(self.response.status_code, 200)
self.assertEqual(old_menusection_count, new_menusection_count)
# updated object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
# request.DELETE
def test_request_delete_method_destroy_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authorized_user(self):
# delete the object
old_menusection_count = MenuSection.objects.count()
self.response = self.client.delete(self.current_test_url)
new_menusection_count = MenuSection.objects.count()
# object deleted successfully, and object count decreased by one
self.assertEqual(self.response.status_code, 204)
self.assertEqual(old_menusection_count - 1, new_menusection_count)
class MenuItemListTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuItemList
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
cls.test_menusection = f.MenuSectionFactory()
cls.test_menuitems = f.MenuItemFactory.create_batch(
size=3,
menusection=cls.test_menusection,
admin_users=[cls.restaurant_admin_user])
# generate test url
cls.kwargs = {
'restaurant_pk':
cls.test_menuitems[0].menusection.menu.restaurant.pk,
'menu_pk': cls.test_menuitems[0].menusection.menu.pk,
'menusection_pk': cls.test_menuitems[0].menusection.pk}
cls.current_test_url = \
reverse('api:menuitem_list', kwargs=cls.kwargs)
def setUp(self):
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuItemList')
def test_view_parent_class(self):
self.assertEqual(self.view.__bases__[-1], generics.ListCreateAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menu_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuItemSerializer)
# request.GET
def test_request_get_method_list_objects_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_list_objects_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_list_objects_authorized_user(self):
# get expected objects from serializer
menuitems = MenuItem.objects.filter(
menusection__pk=self.test_menusection.pk)
serializer = serializers.MenuItemSerializer(menuitems, many=True)
# get actual objects from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the actual result
self.assertEqual(self.response.data, serializer.data)
# request.POST
def test_request_post_method_create_object_unauthenticated_user(self):
post_data = {'name': 'Created Menu Item'}
self.client.logout()
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authenticated_user(self):
post_data = {'name': 'Created Menu Item'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authorized_user(self):
post_data = {'name': 'Created Menu Item'}
# create a new object, with a before-and-after count
old_menuitem_count = MenuItem.objects.count()
self.response = self.client.post(self.current_test_url, post_data)
new_menuitem_count = MenuItem.objects.count()
# new object created successfully
self.assertEqual(self.response.status_code, 201)
self.assertEqual(old_menuitem_count + 1, new_menuitem_count)
# new object has correct parameters
self.assertEqual(
self.response.data['menusection_name'], self.test_menusection.name)
self.assertEqual(self.response.data['name'], post_data['name'])
class MenuItemDetailTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuItemDetail
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
def setUp(self):
self.test_menuitem = \
f.MenuItemFactory(admin_users=[self.restaurant_admin_user])
# generate test url
self.kwargs = {
'restaurant_pk': self.test_menuitem.menusection.menu.restaurant.pk,
'menu_pk': self.test_menuitem.menusection.menu.pk,
'menusection_pk': self.test_menuitem.menusection.pk,
'menuitem_pk': self.test_menuitem.pk}
self.current_test_url = \
reverse('api:menuitem_detail', kwargs=self.kwargs)
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuItemDetail')
def test_view_parent_class(self):
self.assertEqual(
self.view.__bases__[-1], generics.RetrieveUpdateDestroyAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menuitem_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuItemSerializer)
# request.GET
def test_request_get_method_retrieve_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_retrieve_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_retrieve_object_authorized_user(self):
# get expected result from serializer
menuitem = MenuItem.objects.filter(pk=self.test_menuitem.pk)
serializer = serializers.MenuItemSerializer(menuitem, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, dict(serializer.data[0]))
# request.PUT
def test_request_put_method_update_object_unauthenticated_user(self):
post_data = {'name': 'Updated Menu Item Name',
'description': 'Updated Menu Item Description'}
self.client.logout()
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authenticated_user(self):
post_data = {'name': 'Updated Menu Item Name',
'description': 'Updated Menu Item Description'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authorized_user(self):
post_data = {'name': 'Updated Menu Item Name',
'description': 'Updated Menu Item Description'}
old_menuitem_count = MenuItem.objects.count()
self.response = self.client.put(self.current_test_url, post_data)
new_menuitem_count = MenuItem.objects.count()
# object updated successfully
self.assertEqual(self.response.status_code, 200)
self.assertEqual(old_menuitem_count, new_menuitem_count)
# new object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
# request.DELETE
def test_request_delete_method_destroy_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object(self):
# delete the object
old_menuitem_count = MenuItem.objects.count()
self.response = self.client.delete(self.current_test_url)
new_menuitem_count = MenuItem.objects.count()
# object deleted successfully, object count decreased by one
self.assertEqual(self.response.status_code, 204)
self.assertEqual(old_menuitem_count - 1, new_menuitem_count) | api/test_views.py | from django.contrib.auth import get_user_model
from django.urls import reverse
from rest_framework import generics
from rest_framework.permissions import IsAuthenticated
from rest_framework.test import APITestCase
from menus_project import constants as c
from menus_project import factories as f
from . import serializers, views
from .permissions import HasRestaurantPermissionsOrReadOnly
from restaurants.models import Restaurant
from menus.models import Menu, MenuSection, MenuItem
class IsUsernameAvailableTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.is_username_available
# create model objects
cls.test_user = f.UserFactory()
def test_available_username(self):
available_username = 'available_username'
self.current_test_url = reverse('api:is_username_available', kwargs = {
'username': available_username})
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
response_json = self.response.content.decode('utf-8')
self.assertJSONEqual(response_json, {"isUsernameAvailable": True})
def test_unavailable_username(self):
unavailable_username = self.test_user.username
self.current_test_url = reverse('api:is_username_available', kwargs = {
'username': unavailable_username})
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
response_json = self.response.content.decode('utf-8')
self.assertJSONEqual(response_json, {"isUsernameAvailable": False})
class IsEmailAvailableTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.is_email_available
# create model objects
cls.test_user = f.UserFactory()
def test_available_username(self):
available_email = '<EMAIL>'
self.current_test_url = reverse('api:is_email_available', kwargs = {
'email': available_email})
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
response_json = self.response.content.decode('utf-8')
self.assertJSONEqual(response_json, {"isEmailAvailable": True})
def test_unavailable_username(self):
unavailable_email = self.test_user.email
self.current_test_url = reverse('api:is_email_available', kwargs = {
'email': unavailable_email})
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
response_json = self.response.content.decode('utf-8')
self.assertJSONEqual(response_json, {"isEmailAvailable": False})
class RestaurantListTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.RestaurantList
# create model objects
cls.restaurant_admin_user = f.UserFactory()
cls.test_restaurants = f.RestaurantFactory.create_batch(3)
# generate test url
cls.current_test_url = reverse('api:restaurant_list')
def setUp(self):
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'RestaurantList')
def test_view_parent_class(self):
self.assertEqual(self.view.__bases__[-1], generics.ListCreateAPIView)
def test_permission_classes(self):
self.assertEqual(self.view.permission_classes, [IsAuthenticated])
def test_queryset(self):
self.assertEqual(
repr(self.view.queryset), repr(Restaurant.objects.all()))
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.RestaurantSerializer)
# request.GET
def test_request_get_method_list_objects_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_list_objects_authenticated_user(self):
# get expected objects from serializer
restaurants = Restaurant.objects.all()
serializer = serializers.RestaurantSerializer(restaurants, many=True)
# get actual objects from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the actual result
self.assertEqual(self.response.data, serializer.data)
# request.POST
def test_request_post_method_create_object_unauthenticated_user(self):
post_data = {'name': 'Created Restaurant'}
self.client.logout()
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authenticated_user(self):
post_data = {'name': 'Created Restaurant'}
# create a new object, with a before-and-after count
old_restaurant_count = Restaurant.objects.count()
self.response = self.client.post(self.current_test_url, post_data)
new_restaurant_count = Restaurant.objects.count()
# new object created successfully
self.assertEqual(self.response.status_code, 201)
self.assertEqual(old_restaurant_count + 1, new_restaurant_count)
# new object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
self.assertEqual(
self.response.data['admin_users'], [self.restaurant_admin_user.pk])
class RestaurantDetailTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.RestaurantDetail
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
def setUp(self):
self.test_restaurant = \
f.RestaurantFactory(admin_users=[self.restaurant_admin_user])
# generate test url
self.kwargs = {'restaurant_pk': self.test_restaurant.pk}
self.current_test_url = \
reverse('api:restaurant_detail', kwargs=self.kwargs)
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'RestaurantDetail')
def test_view_parent_class(self):
self.assertEqual(
self.view.__bases__[-1], generics.RetrieveUpdateDestroyAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'restaurant_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.RestaurantSerializer)
# request.GET
def test_request_get_method_retrieve_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_retrieve_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_retrieve_object_authorized_user(self):
# get expected result from serializer
restaurant = Restaurant.objects.filter(pk=self.test_restaurant.pk)
serializer = serializers.RestaurantSerializer(restaurant, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, dict(serializer.data[0]))
# request.PUT
def test_request_put_method_update_object_unauthenticated_user(self):
post_data = {'name': 'Updated Restaurant Name'}
self.client.logout()
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authenticated_user(self):
post_data = {'name': 'Updated Restaurant Name'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authorized_user(self):
post_data = {'name': 'Updated Restaurant Name'}
old_restaurant_count = Restaurant.objects.count()
self.response = self.client.put(self.current_test_url, post_data)
new_restaurant_count = Restaurant.objects.count()
# object updated successfully
self.assertEqual(self.response.status_code, 200)
self.assertEqual(old_restaurant_count, new_restaurant_count)
# new object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
# request.DELETE
def test_request_delete_method_destroy_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>USER_PASSWORD)
self.assertTrue(login_successful)
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authorized_user(self):
# delete the object
old_restaurant_count = Restaurant.objects.count()
self.response = self.client.delete(self.current_test_url)
new_restaurant_count = Restaurant.objects.count()
# object deleted successfully, and object count decreased by one
self.assertEqual(self.response.status_code, 204)
self.assertEqual(old_restaurant_count - 1, new_restaurant_count)
class MenuListTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuList
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
cls.test_restaurant = f.RestaurantFactory()
cls.test_menus = f.MenuFactory.create_batch(
size=3,
restaurant=cls.test_restaurant,
admin_users=[cls.restaurant_admin_user])
# generate test url
cls.kwargs = {'restaurant_pk': cls.test_menus[0].pk}
cls.current_test_url = reverse('api:menu_list', kwargs=cls.kwargs)
def setUp(self):
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuList')
def test_view_parent_class(self):
self.assertEqual(self.view.__bases__[-1], generics.ListCreateAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'restaurant_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuSerializer)
# request.GET
def test_request_get_method_list_objects_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_list_objects_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_list_objects_authorized_user(self):
# get expected result from serializer
menus = Menu.objects.filter(
restaurant__pk=self.test_menus[0].restaurant.pk)
serializer = serializers.MenuSerializer(menus, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, serializer.data)
# request.POST
def test_request_post_method_create_object_unauthenticated_user(self):
post_data = {'name': 'Created Menu'}
self.client.logout()
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authenticated_user(self):
post_data = {'name': 'Created Menu'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authorized_user(self):
post_data = {'name': 'Created Menu'}
# create a new object, with a before-and-after count
old_menu_count = Menu.objects.count()
self.response = self.client.post(self.current_test_url, post_data)
new_menu_count = Menu.objects.count()
# new object created successfully
self.assertEqual(self.response.status_code, 201)
self.assertEqual(old_menu_count + 1, new_menu_count)
# new object has correct parameters
self.assertEqual(
self.response.data['restaurant_name'], self.test_restaurant.name)
self.assertEqual(self.response.data['name'], post_data['name'])
class MenuDetailTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuDetail
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
def setUp(self):
self.test_menu = \
f.MenuFactory(admin_users=[self.restaurant_admin_user])
# generate test url
self.kwargs = {'restaurant_pk': self.test_menu.restaurant.pk,
'menu_pk': self.test_menu.pk}
self.current_test_url = \
reverse('api:menu_detail', kwargs=self.kwargs)
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
self.response = self.client.get(self.current_test_url)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuDetail')
def test_view_parent_class(self):
self.assertEqual(
self.view.__bases__[-1], generics.RetrieveUpdateDestroyAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menu_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuSerializer)
# request.GET
def test_request_get_method_retrieve_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_retrieve_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_retrieve_object_authorized_user(self):
# get expected result from serializer
menu = Menu.objects.filter(pk=self.test_menu.pk)
serializer = serializers.MenuSerializer(menu, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, dict(serializer.data[0]))
# request.PUT
def test_request_put_method_update_object_unauthenticated_user(self):
post_data = {'name': 'Updated Menu Name'}
self.client.logout()
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authenticated_user(self):
post_data = {'name': 'Updated Menu Name'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authorized_user(self):
post_data = {'name': 'Updated Menu Name'}
old_menu_count = Menu.objects.count()
self.response = self.client.put(self.current_test_url, post_data)
new_menu_count = Menu.objects.count()
# object updated successfully
self.assertEqual(self.response.status_code, 200)
self.assertEqual(old_menu_count, new_menu_count)
# new object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
# request.DELETE
def test_request_delete_method_destroy_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authorized_user(self):
# delete the object
old_menu_count = Menu.objects.count()
self.response = self.client.delete(self.current_test_url)
new_menu_count = Menu.objects.count()
# object deleted successfully, and object count decreased by one
self.assertEqual(self.response.status_code, 204)
self.assertEqual(old_menu_count - 1, new_menu_count)
class MenuSectionListTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuSectionList
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
cls.test_menu = f.MenuFactory()
cls.test_menusections = f.MenuSectionFactory.create_batch(
size=3,
menu=cls.test_menu,
admin_users=[cls.restaurant_admin_user])
# generate test url
cls.kwargs = {'restaurant_pk': cls.test_menu.restaurant.pk,
'menu_pk': cls.test_menu.pk}
cls.current_test_url = \
reverse('api:menusection_list', kwargs=cls.kwargs)
def setUp(self):
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuSectionList')
def test_view_parent_class(self):
self.assertEqual(self.view.__bases__[-1], generics.ListCreateAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menu_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuSectionSerializer)
# request.GET
def test_request_get_method_list_objects_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_list_objects_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_list_objects_authorized_user(self):
# get expected objects from serializer
menusections = \
MenuSection.objects.filter(menu__pk=self.test_menu.pk)
serializer = serializers.MenuSectionSerializer(menusections, many=True)
# get actual objects from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the actual result
self.assertEqual(self.response.data, serializer.data)
# request.POST
def test_request_post_method_create_object_unauthenticated_user(self):
post_data = {'name': 'Created Menu Section'}
self.client.logout()
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authenticated_user(self):
post_data = {'name': 'Created Menu Section'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authorized_user(self):
post_data = {'name': 'Created Menu Section'}
# create a new object, with a before-and-after count
old_menusection_count = MenuSection.objects.count()
self.response = self.client.post(self.current_test_url, post_data)
new_menusection_count = MenuSection.objects.count()
# new object created successfully
self.assertEqual(self.response.status_code, 201)
self.assertEqual(old_menusection_count + 1, new_menusection_count)
# new object has correct parameters
self.assertEqual(
self.response.data['menu_name'], self.test_menu.name)
self.assertEqual(self.response.data['name'], post_data['name'])
class MenuSectionDetailTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuSectionDetail
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
def setUp(self):
self.test_menusection = \
f.MenuSectionFactory(admin_users=[self.restaurant_admin_user])
# generate test url
self.kwargs = {
'restaurant_pk': self.test_menusection.menu.restaurant.pk,
'menu_pk': self.test_menusection.menu.pk,
'menusection_pk': self.test_menusection.pk}
self.current_test_url = \
reverse('api:menusection_detail', kwargs=self.kwargs)
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
self.response = self.client.get(self.current_test_url)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuSectionDetail')
def test_view_parent_class(self):
self.assertEqual(
self.view.__bases__[-1], generics.RetrieveUpdateDestroyAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menusection_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuSectionSerializer)
# request.GET
def test_request_get_method_retrieve_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_retrieve_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_retrieve_object_authorized_user(self):
# get expected result from serializer
menusection = MenuSection.objects.filter(pk=self.test_menusection.pk)
serializer = serializers.MenuSectionSerializer(menusection, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, dict(serializer.data[0]))
# request.PUT
def test_request_put_method_update_object_unauthenticated_user(self):
post_data = {'name': 'Updated Menu Section Name'}
self.client.logout()
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authenticated_user(self):
post_data = {'name': 'Updated Menu Section Name'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authorized_user(self):
post_data = {'name': 'Updated Menu Section Name'}
# update the object
old_menusection_count = MenuSection.objects.count()
self.response = self.client.put(self.current_test_url, post_data)
new_menusection_count = MenuSection.objects.count()
# object updated successfully, object count is unchanged
self.assertEqual(self.response.status_code, 200)
self.assertEqual(old_menusection_count, new_menusection_count)
# updated object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
# request.DELETE
def test_request_delete_method_destroy_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authorized_user(self):
# delete the object
old_menusection_count = MenuSection.objects.count()
self.response = self.client.delete(self.current_test_url)
new_menusection_count = MenuSection.objects.count()
# object deleted successfully, and object count decreased by one
self.assertEqual(self.response.status_code, 204)
self.assertEqual(old_menusection_count - 1, new_menusection_count)
class MenuItemListTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuItemList
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
cls.test_menusection = f.MenuSectionFactory()
cls.test_menuitems = f.MenuItemFactory.create_batch(
size=3,
menusection=cls.test_menusection,
admin_users=[cls.restaurant_admin_user])
# generate test url
cls.kwargs = {
'restaurant_pk':
cls.test_menuitems[0].menusection.menu.restaurant.pk,
'menu_pk': cls.test_menuitems[0].menusection.menu.pk,
'menusection_pk': cls.test_menuitems[0].menusection.pk}
cls.current_test_url = \
reverse('api:menuitem_list', kwargs=cls.kwargs)
def setUp(self):
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuItemList')
def test_view_parent_class(self):
self.assertEqual(self.view.__bases__[-1], generics.ListCreateAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menu_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuItemSerializer)
# request.GET
def test_request_get_method_list_objects_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_list_objects_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_list_objects_authorized_user(self):
# get expected objects from serializer
menuitems = MenuItem.objects.filter(
menusection__pk=self.test_menusection.pk)
serializer = serializers.MenuItemSerializer(menuitems, many=True)
# get actual objects from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the actual result
self.assertEqual(self.response.data, serializer.data)
# request.POST
def test_request_post_method_create_object_unauthenticated_user(self):
post_data = {'name': 'Created Menu Item'}
self.client.logout()
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authenticated_user(self):
post_data = {'name': 'Created Menu Item'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.post(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_post_method_create_object_authorized_user(self):
post_data = {'name': 'Created Menu Item'}
# create a new object, with a before-and-after count
old_menuitem_count = MenuItem.objects.count()
self.response = self.client.post(self.current_test_url, post_data)
new_menuitem_count = MenuItem.objects.count()
# new object created successfully
self.assertEqual(self.response.status_code, 201)
self.assertEqual(old_menuitem_count + 1, new_menuitem_count)
# new object has correct parameters
self.assertEqual(
self.response.data['menusection_name'], self.test_menusection.name)
self.assertEqual(self.response.data['name'], post_data['name'])
class MenuItemDetailTest(APITestCase):
@classmethod
def setUpTestData(cls):
cls.view = views.MenuItemDetail
# create model objects
cls.test_user = f.UserFactory()
cls.restaurant_admin_user = f.UserFactory()
def setUp(self):
self.test_menuitem = \
f.MenuItemFactory(admin_users=[self.restaurant_admin_user])
# generate test url
self.kwargs = {
'restaurant_pk': self.test_menuitem.menusection.menu.restaurant.pk,
'menu_pk': self.test_menuitem.menusection.menu.pk,
'menusection_pk': self.test_menuitem.menusection.pk,
'menuitem_pk': self.test_menuitem.pk}
self.current_test_url = \
reverse('api:menuitem_detail', kwargs=self.kwargs)
self.client.login(username=self.restaurant_admin_user.username,
password=<PASSWORD>)
# view attributes
def test_view_name(self):
self.assertEqual(self.view.__name__, 'MenuItemDetail')
def test_view_parent_class(self):
self.assertEqual(
self.view.__bases__[-1], generics.RetrieveUpdateDestroyAPIView)
def test_permission_classes(self):
self.assertEqual(
self.view.permission_classes, [HasRestaurantPermissionsOrReadOnly])
def test_lookup_url_kwarg(self):
self.assertEqual(self.view.lookup_url_kwarg, 'menuitem_pk')
def test_serializer_class(self):
self.assertEqual(
self.view.serializer_class, serializers.MenuItemSerializer)
# request.GET
def test_request_get_method_retrieve_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_get_method_retrieve_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
def test_request_get_method_retrieve_object_authorized_user(self):
# get expected result from serializer
menuitem = MenuItem.objects.filter(pk=self.test_menuitem.pk)
serializer = serializers.MenuItemSerializer(menuitem, many=True)
# get result from view
self.response = self.client.get(self.current_test_url)
self.assertEqual(self.response.status_code, 200)
# compare the expected result with the result
self.assertEqual(self.response.data, dict(serializer.data[0]))
# request.PUT
def test_request_put_method_update_object_unauthenticated_user(self):
post_data = {'name': 'Updated Menu Item Name',
'description': 'Updated Menu Item Description'}
self.client.logout()
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authenticated_user(self):
post_data = {'name': 'Updated Menu Item Name',
'description': 'Updated Menu Item Description'}
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.put(self.current_test_url, post_data)
self.assertEqual(self.response.status_code, 403)
def test_request_put_method_update_object_authorized_user(self):
post_data = {'name': 'Updated Menu Item Name',
'description': 'Updated Menu Item Description'}
old_menuitem_count = MenuItem.objects.count()
self.response = self.client.put(self.current_test_url, post_data)
new_menuitem_count = MenuItem.objects.count()
# object updated successfully
self.assertEqual(self.response.status_code, 200)
self.assertEqual(old_menuitem_count, new_menuitem_count)
# new object has correct parameters
self.assertEqual(self.response.data['name'], post_data['name'])
# request.DELETE
def test_request_delete_method_destroy_object_unauthenticated_user(self):
self.client.logout()
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object_authenticated_user(self):
login_successful = self.client.login(
username=self.test_user.username, password=<PASSWORD>)
self.assertTrue(login_successful)
self.response = self.client.delete(self.current_test_url)
self.assertEqual(self.response.status_code, 403)
def test_request_delete_method_destroy_object(self):
# delete the object
old_menuitem_count = MenuItem.objects.count()
self.response = self.client.delete(self.current_test_url)
new_menuitem_count = MenuItem.objects.count()
# object deleted successfully, object count decreased by one
self.assertEqual(self.response.status_code, 204)
self.assertEqual(old_menuitem_count - 1, new_menuitem_count) | 0.622 | 0.317109 |
import argparse
import pytorch_lightning as pl
from wheat.config import load_config
from wheat.data_module import WheatDataModule
from wheat.dataset import WheatDataset
from wheat.model import WheatModel
def predict(config, args_dict: dict, ckpt_path: str):
"""Run inference on the test set and save predictions.
:param config: configobj mapping of config paramters to values
:param args_dict: dict of options for initializing PyTorch Lightning Trainer
:param ckpt_path: path to checkpoint to load for inference
"""
wheat_data_module = WheatDataModule(config)
model = WheatModel(config)
trainer = pl.Trainer(**args_dict)
results = trainer.predict(model, wheat_data_module, ckpt_path=ckpt_path)
score_threshold = config['predict']['score_threshold']
save_submission(results, wheat_data_module.test_dataset, score_threshold)
def save_submission(results, test_dataset: WheatDataset, score_threshold: float,
save_path='submission.csv'):
"""Save inference outputs in format specified by Kaggle competition.
:param results: outputs from Trainer.predict; list of list of dicts with keys
'scores', 'boxes', and 'labels'
:param score_threshold: predictions with scores below this value will be discarded
"""
image_ids = test_dataset.image_ids
header = 'image_id,PredictionString\n'
with open(save_path, 'w', encoding='utf-8') as file_obj:
file_obj.write(header)
for image_id, predictions in zip(image_ids, results):
keep_inds = predictions[0]['scores'] > score_threshold
keep_scores = (predictions[0]['scores'][keep_inds]).cpu().numpy()
keep_boxes = predictions[0]['boxes'][keep_inds, :].cpu().numpy()
# convert last two columns to width and height instead of xmax, ymax
keep_boxes[:, 2:] -= keep_boxes[:, :2]
line = f'{image_id},'
for ind, score in enumerate(keep_scores):
line += f' {score} '
line += ' '.join([str(round(val)) for val in keep_boxes[ind]])
line += '\n'
file_obj.write(line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('ckpt_path', type=str,
help='Path to model checkpoint file')
parser.add_argument('--config-path', type=str, default=None,
help='Path to config file')
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
args_dict = vars(args)
config = load_config(args_dict.pop('config_path'))
ckpt_path = args_dict.pop('ckpt_path')
predict(config, args_dict, ckpt_path)
if __name__ == '__main__':
main() | wheat/scripts/predict.py | import argparse
import pytorch_lightning as pl
from wheat.config import load_config
from wheat.data_module import WheatDataModule
from wheat.dataset import WheatDataset
from wheat.model import WheatModel
def predict(config, args_dict: dict, ckpt_path: str):
"""Run inference on the test set and save predictions.
:param config: configobj mapping of config paramters to values
:param args_dict: dict of options for initializing PyTorch Lightning Trainer
:param ckpt_path: path to checkpoint to load for inference
"""
wheat_data_module = WheatDataModule(config)
model = WheatModel(config)
trainer = pl.Trainer(**args_dict)
results = trainer.predict(model, wheat_data_module, ckpt_path=ckpt_path)
score_threshold = config['predict']['score_threshold']
save_submission(results, wheat_data_module.test_dataset, score_threshold)
def save_submission(results, test_dataset: WheatDataset, score_threshold: float,
save_path='submission.csv'):
"""Save inference outputs in format specified by Kaggle competition.
:param results: outputs from Trainer.predict; list of list of dicts with keys
'scores', 'boxes', and 'labels'
:param score_threshold: predictions with scores below this value will be discarded
"""
image_ids = test_dataset.image_ids
header = 'image_id,PredictionString\n'
with open(save_path, 'w', encoding='utf-8') as file_obj:
file_obj.write(header)
for image_id, predictions in zip(image_ids, results):
keep_inds = predictions[0]['scores'] > score_threshold
keep_scores = (predictions[0]['scores'][keep_inds]).cpu().numpy()
keep_boxes = predictions[0]['boxes'][keep_inds, :].cpu().numpy()
# convert last two columns to width and height instead of xmax, ymax
keep_boxes[:, 2:] -= keep_boxes[:, :2]
line = f'{image_id},'
for ind, score in enumerate(keep_scores):
line += f' {score} '
line += ' '.join([str(round(val)) for val in keep_boxes[ind]])
line += '\n'
file_obj.write(line)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('ckpt_path', type=str,
help='Path to model checkpoint file')
parser.add_argument('--config-path', type=str, default=None,
help='Path to config file')
parser = pl.Trainer.add_argparse_args(parser)
args = parser.parse_args()
args_dict = vars(args)
config = load_config(args_dict.pop('config_path'))
ckpt_path = args_dict.pop('ckpt_path')
predict(config, args_dict, ckpt_path)
if __name__ == '__main__':
main() | 0.721351 | 0.395835 |
import pulp
import random
import time
import matplotlib.pyplot as plt
'''
้ๅ่ฆ็้ฎ้ข๏ผ
โข ่พๅ
ฅ: ๆ้้ X, X ็ๅญ้ๅๆ F, X=โช๐โ๐น S
โข ่พๅบ: CโF, ๆปก่ถณ
๏ผ1๏ผX=โช๐โ๐ถ S
๏ผ2๏ผC ๆฏๆปก่ถณๆกไปถ(1)็ๆๅฐ้ๆ, ๅณ|C|ๆๅฐ.
'''
# ็ๆๅฎ้ชๆฐๆฎ
class generate():
def __init__(self):
self.X = [] # ๆ้้
self.F = [] # ๅญ้ๅๆ
def generateData(self, size):
X = set(list(range(size))) # 0~n-1
self.X = X
S = [random.randint(0, size - 1) for i in range(20)] # ้ๆบ้Xไธญ็ 20 ไธช็นๆพๅ
ฅS0
S = set(S)
self.F.append(list(S))
# union_s่กจ็คบโชSj,rest่กจ็คบX-union_s
union_s = S
rest = X - union_s
while len(rest) >= 20:
n = random.randint(1, 20)
x = random.randint(1, n)
S = set(random.sample(rest, x))
S.update(random.sample(union_s, n - x))
union_s.update(S) # ๆดๆฐๅทฒ็ป่ขซ้ๆฉ็ๆฐๆฎ้ๅ
rest = X - union_s
self.F.append(list(S))
# ๅฐไบ20ๆถ็ดๆฅๅ ๅ
ฅ
if len(rest) > 0:
self.F.append(list(rest))
# ็ๆ|F|-yไธช้ๆบ้ๅ
y = len(self.F)
for i in range(size - y):
n = random.randint(1, 20)
S = random.sample(X, n)
self.F.append(list(S))
for i in range(len(self.F)):
self.F[i] = set(self.F[i])
class set_coverage:
"""Set coverage"""
def __init__(self,):
"""Constructor for set_coverage"""
def greedy(self, X, F) -> list:
print(F)
U = set(X)
C = []
while U:
# ่ดชๅฟ็ญ็ฅ:ๆฏๆฌก้ๆฉ่ฆ็Uไธญๅ
็ด ๆๅค็้ๅๅ ๅ
ฅๅฐCไธญ๏ผ
S = max(F,key=(lambda x:len(U.intersection(x))))
U -= S
C.append(S)
return C
def liner_programming(self, X:list, F) -> list:
# ๅ ๆ้ๅ่ฆ็๏ผ่ๅ
ฅ
# xs = {0,1}
X = list(X)
A = [] # ็ณปๆฐ็ฉ้ต
for i in range(len(X)):
row = []
for j in range(len(F)): # eๅฑไบX๏ผ eๅฑไบSi
if X[i] in F[j]:
row.append(1)
else:
row.append(0)
A.append(row)
f = max([sum(r) for r in A]) # ็ป่ฎกX็ๅ
็ด ๅจFไธญ็ๆๅคง้ข็,
t = 1 / f
# ๆๅปบ็บฟๆงๆน็จ
prob = pulp.LpProblem("Linear minimize problem", pulp.LpMinimize)
# ๆทปๅ ๅ้ x_num
ingredient_vars = pulp.LpVariable.dicts("x", X, lowBound=0, upBound=1, cat="Continuous")
# ๆทปๅ ็ฎๆ ๅฝๆฐ
prob += pulp.lpSum([1 * ingredient_vars[i] for i in X])
# ๆทปๅ ็บฆๆ
for i in range(len(X)):
prob += pulp.lpSum([A[i][j] * ingredient_vars[j] for j in range(len(F))]) >= 1
prob.solve()
prob = prob.variables()
# ๆ็
ง็ฎๆ ๆน็จๆๅบ
prob = sorted(prob, key=lambda x: int(x.name[2:]))
# ๆ็
ง้ๅผ่ฟ่ก่ๅ
ฅ
C = [set(f) for i, f in enumerate(F) if prob[i].varValue > t]
return C
if __name__ == "__main__":
time_lp = []
time_greedy = []
def write(time1:list, time2:list, filename='a.csv'):
with open(filename, "w+") as f:
for i in range(len(time1)):
f.write(str(time1[i]) + ',' + str(time2[i]) + "\n")
def read(time1, time2, filename='a.csv'):
for line in open(filename,"r"):
time1.append(float(line.split(",")[0]))
time2.append(float(line.split(",")[1]))
def draw(x,time1, time2):
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('็ปๆ')
plt.xlabel("ๆฐๆฎ้/ไธช") # xlabelใylabel๏ผๅๅซ่ฎพ็ฝฎXใY่ฝด็ๆ ้ขๆๅญใ
plt.ylabel("็ฎๆณ่ฟ่กๆถ้ด/ms")
plt.plot(x, time1, color='red', label='็บฟๆง่งๅ')
plt.plot(x, time2, color='blue', label='่ดชๅฟ')
for a,b,c in zip(x,time1,time2):
plt.text(a, b, "%f" % b , ha='center', va='bottom', fontsize=12)
plt.text(a, c, "%f" % c, ha='center', va='top', fontsize=12)
plt.legend()
plt.show()
datasize = [100, 1000, 5000]
# ่ฏปๆไปถ
read(time_lp, time_greedy)
draw(datasize, time_lp, time_greedy)
time_lp.clear()
time_greedy.clear()
for i in datasize:
g = generate()
g.generateData(i)
s = set_coverage()
time1 = time.time()
s.liner_programming(g.X, g.F)
time2 = time.time()
time_lp.append((time2 - time1)*1000)
print("็บฟๆง่งๅ๏ผ"+str(time_lp[-1]))
time1 = time.time()
s.greedy(g.X, g.F)
time2 = time.time()
time_greedy.append((time2 - time1) * 1000)
print("่ดชๅฟ๏ผ"+str(time_greedy[-1]))
print(time_lp)
print(time_greedy)
write(time_lp, time_greedy)
draw(datasize,time_lp, time_greedy)
# print (s.LP(g.rawData, g.data)) | src/ApproximateAlgorithm/SetCoverage.py | import pulp
import random
import time
import matplotlib.pyplot as plt
'''
้ๅ่ฆ็้ฎ้ข๏ผ
โข ่พๅ
ฅ: ๆ้้ X, X ็ๅญ้ๅๆ F, X=โช๐โ๐น S
โข ่พๅบ: CโF, ๆปก่ถณ
๏ผ1๏ผX=โช๐โ๐ถ S
๏ผ2๏ผC ๆฏๆปก่ถณๆกไปถ(1)็ๆๅฐ้ๆ, ๅณ|C|ๆๅฐ.
'''
# ็ๆๅฎ้ชๆฐๆฎ
class generate():
def __init__(self):
self.X = [] # ๆ้้
self.F = [] # ๅญ้ๅๆ
def generateData(self, size):
X = set(list(range(size))) # 0~n-1
self.X = X
S = [random.randint(0, size - 1) for i in range(20)] # ้ๆบ้Xไธญ็ 20 ไธช็นๆพๅ
ฅS0
S = set(S)
self.F.append(list(S))
# union_s่กจ็คบโชSj,rest่กจ็คบX-union_s
union_s = S
rest = X - union_s
while len(rest) >= 20:
n = random.randint(1, 20)
x = random.randint(1, n)
S = set(random.sample(rest, x))
S.update(random.sample(union_s, n - x))
union_s.update(S) # ๆดๆฐๅทฒ็ป่ขซ้ๆฉ็ๆฐๆฎ้ๅ
rest = X - union_s
self.F.append(list(S))
# ๅฐไบ20ๆถ็ดๆฅๅ ๅ
ฅ
if len(rest) > 0:
self.F.append(list(rest))
# ็ๆ|F|-yไธช้ๆบ้ๅ
y = len(self.F)
for i in range(size - y):
n = random.randint(1, 20)
S = random.sample(X, n)
self.F.append(list(S))
for i in range(len(self.F)):
self.F[i] = set(self.F[i])
class set_coverage:
"""Set coverage"""
def __init__(self,):
"""Constructor for set_coverage"""
def greedy(self, X, F) -> list:
print(F)
U = set(X)
C = []
while U:
# ่ดชๅฟ็ญ็ฅ:ๆฏๆฌก้ๆฉ่ฆ็Uไธญๅ
็ด ๆๅค็้ๅๅ ๅ
ฅๅฐCไธญ๏ผ
S = max(F,key=(lambda x:len(U.intersection(x))))
U -= S
C.append(S)
return C
def liner_programming(self, X:list, F) -> list:
# ๅ ๆ้ๅ่ฆ็๏ผ่ๅ
ฅ
# xs = {0,1}
X = list(X)
A = [] # ็ณปๆฐ็ฉ้ต
for i in range(len(X)):
row = []
for j in range(len(F)): # eๅฑไบX๏ผ eๅฑไบSi
if X[i] in F[j]:
row.append(1)
else:
row.append(0)
A.append(row)
f = max([sum(r) for r in A]) # ็ป่ฎกX็ๅ
็ด ๅจFไธญ็ๆๅคง้ข็,
t = 1 / f
# ๆๅปบ็บฟๆงๆน็จ
prob = pulp.LpProblem("Linear minimize problem", pulp.LpMinimize)
# ๆทปๅ ๅ้ x_num
ingredient_vars = pulp.LpVariable.dicts("x", X, lowBound=0, upBound=1, cat="Continuous")
# ๆทปๅ ็ฎๆ ๅฝๆฐ
prob += pulp.lpSum([1 * ingredient_vars[i] for i in X])
# ๆทปๅ ็บฆๆ
for i in range(len(X)):
prob += pulp.lpSum([A[i][j] * ingredient_vars[j] for j in range(len(F))]) >= 1
prob.solve()
prob = prob.variables()
# ๆ็
ง็ฎๆ ๆน็จๆๅบ
prob = sorted(prob, key=lambda x: int(x.name[2:]))
# ๆ็
ง้ๅผ่ฟ่ก่ๅ
ฅ
C = [set(f) for i, f in enumerate(F) if prob[i].varValue > t]
return C
if __name__ == "__main__":
time_lp = []
time_greedy = []
def write(time1:list, time2:list, filename='a.csv'):
with open(filename, "w+") as f:
for i in range(len(time1)):
f.write(str(time1[i]) + ',' + str(time2[i]) + "\n")
def read(time1, time2, filename='a.csv'):
for line in open(filename,"r"):
time1.append(float(line.split(",")[0]))
time2.append(float(line.split(",")[1]))
def draw(x,time1, time2):
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
plt.title('็ปๆ')
plt.xlabel("ๆฐๆฎ้/ไธช") # xlabelใylabel๏ผๅๅซ่ฎพ็ฝฎXใY่ฝด็ๆ ้ขๆๅญใ
plt.ylabel("็ฎๆณ่ฟ่กๆถ้ด/ms")
plt.plot(x, time1, color='red', label='็บฟๆง่งๅ')
plt.plot(x, time2, color='blue', label='่ดชๅฟ')
for a,b,c in zip(x,time1,time2):
plt.text(a, b, "%f" % b , ha='center', va='bottom', fontsize=12)
plt.text(a, c, "%f" % c, ha='center', va='top', fontsize=12)
plt.legend()
plt.show()
datasize = [100, 1000, 5000]
# ่ฏปๆไปถ
read(time_lp, time_greedy)
draw(datasize, time_lp, time_greedy)
time_lp.clear()
time_greedy.clear()
for i in datasize:
g = generate()
g.generateData(i)
s = set_coverage()
time1 = time.time()
s.liner_programming(g.X, g.F)
time2 = time.time()
time_lp.append((time2 - time1)*1000)
print("็บฟๆง่งๅ๏ผ"+str(time_lp[-1]))
time1 = time.time()
s.greedy(g.X, g.F)
time2 = time.time()
time_greedy.append((time2 - time1) * 1000)
print("่ดชๅฟ๏ผ"+str(time_greedy[-1]))
print(time_lp)
print(time_greedy)
write(time_lp, time_greedy)
draw(datasize,time_lp, time_greedy)
# print (s.LP(g.rawData, g.data)) | 0.0956 | 0.230432 |
import ctypes
import math
inputFilePath = "Input_Cases/Individual/sudoku_input_difficult.txt"
algoChoice = 1 # defaults to backtrack algo
SudokuSolverLib = ctypes.CDLL('./lib/SudokuSolverLib.so')
def readSudInput(filepath):
# helper function ,reads in .txt
sud_sizes = [el ** 2 for el in range(1, 17)] # square dimensions for sudokus
text = ""
sud_array = []
with open(filepath, "r") as file:
text = file.read()
for el in text:
if el.isdigit():
sud_array.append(int(el))
if len(sud_array) not in sud_sizes:
print(f"unexpected amount of sudoku entries, got {len(sud_array)}, expected a value in: \n {sud_sizes}")
quit("quitting program")
# calc dimensions
sud_dimension = round(math.sqrt(len(sud_array)))
# check if each number falls within this dimension, 0 means no value
pos = 0
for field in sud_array:
if not (0 <= int(field) <= int(sud_dimension)):
print(f"Unexpected value: {field} at pos {pos} \n")
quit(f"Expected a number between 0 and {sud_dimension} based on calculated dimensions. Quitting.")
pos += 1
return [sud_array, len(sud_array), sud_dimension]
def convertToCArray(array, elementCount):
# explicitly converts array to 32 bit ints
cArray= (ctypes.c_int32 * elementCount)(*array)
# * array accesses the contents of the list, space-seperated
return cArray
def convertToCParameters(filepath, algoChoice):
# create cArray
array, arr_len, sud_dimension = readSudInput(filepath)
cArray = convertToCArray(array, arr_len)
# create 32 bit c ints
def createCInts(*args):
converted_ints = []
for arg in args:
converted_ints.append(ctypes.c_int32(arg))
return converted_ints
size, dataDimension = [arr_len,sud_dimension]
cAlgoChoice, cSize, cDataDimension = createCInts(algoChoice, size, dataDimension)
return [cAlgoChoice, cSize, cDataDimension, cArray]
args = convertToCParameters(filepath=inputFilePath,algoChoice = algoChoice)
# int startSudoku(int algoChoice, int size, int dataDimension, int *sudokuArray){
# None, integers, bytes objects and (unicode) strings can be passed directly as parameters
# However, the types are passed explicitly to avoid potential bugs.
# The arguments should be provided positionally as in C!
SudokuSolverLib.startSudoku(*args)
# Figure out how to pass array
#inputFilename = "Input_Cases/Individual/sudoku_input_difficult.txt"
#algoChoice = 1 ## The default algorithm is backtracking
## size = 0 ## total amount of numbers
##dataDimension = 0 ## Length of one side of a sudoku
##sudokuArray ## unsolved sudokus are zero. Unfilled sudoku elements are null. Bug value is -1. | SudokuSolver.py | import ctypes
import math
inputFilePath = "Input_Cases/Individual/sudoku_input_difficult.txt"
algoChoice = 1 # defaults to backtrack algo
SudokuSolverLib = ctypes.CDLL('./lib/SudokuSolverLib.so')
def readSudInput(filepath):
# helper function ,reads in .txt
sud_sizes = [el ** 2 for el in range(1, 17)] # square dimensions for sudokus
text = ""
sud_array = []
with open(filepath, "r") as file:
text = file.read()
for el in text:
if el.isdigit():
sud_array.append(int(el))
if len(sud_array) not in sud_sizes:
print(f"unexpected amount of sudoku entries, got {len(sud_array)}, expected a value in: \n {sud_sizes}")
quit("quitting program")
# calc dimensions
sud_dimension = round(math.sqrt(len(sud_array)))
# check if each number falls within this dimension, 0 means no value
pos = 0
for field in sud_array:
if not (0 <= int(field) <= int(sud_dimension)):
print(f"Unexpected value: {field} at pos {pos} \n")
quit(f"Expected a number between 0 and {sud_dimension} based on calculated dimensions. Quitting.")
pos += 1
return [sud_array, len(sud_array), sud_dimension]
def convertToCArray(array, elementCount):
# explicitly converts array to 32 bit ints
cArray= (ctypes.c_int32 * elementCount)(*array)
# * array accesses the contents of the list, space-seperated
return cArray
def convertToCParameters(filepath, algoChoice):
# create cArray
array, arr_len, sud_dimension = readSudInput(filepath)
cArray = convertToCArray(array, arr_len)
# create 32 bit c ints
def createCInts(*args):
converted_ints = []
for arg in args:
converted_ints.append(ctypes.c_int32(arg))
return converted_ints
size, dataDimension = [arr_len,sud_dimension]
cAlgoChoice, cSize, cDataDimension = createCInts(algoChoice, size, dataDimension)
return [cAlgoChoice, cSize, cDataDimension, cArray]
args = convertToCParameters(filepath=inputFilePath,algoChoice = algoChoice)
# int startSudoku(int algoChoice, int size, int dataDimension, int *sudokuArray){
# None, integers, bytes objects and (unicode) strings can be passed directly as parameters
# However, the types are passed explicitly to avoid potential bugs.
# The arguments should be provided positionally as in C!
SudokuSolverLib.startSudoku(*args)
# Figure out how to pass array
#inputFilename = "Input_Cases/Individual/sudoku_input_difficult.txt"
#algoChoice = 1 ## The default algorithm is backtracking
## size = 0 ## total amount of numbers
##dataDimension = 0 ## Length of one side of a sudoku
##sudokuArray ## unsolved sudokus are zero. Unfilled sudoku elements are null. Bug value is -1. | 0.398524 | 0.387314 |
from jcourse_api.models import *
def create_test_env() -> None:
dept_seiee = Department.objects.create(name='SEIEE')
dept_phy = Department.objects.create(name='PHYSICS')
teacher_gao = Teacher.objects.create(tid=1, name='้ซๅฅณๅฃซ', department=dept_seiee, title='ๆๆ', pinyin='gaoxiaofeng',
abbr_pinyin='gxf')
teacher_pan = Teacher.objects.create(tid=4, name='ๆฝ่ๅธ', department=dept_seiee, title='ๆๆ', pinyin='panli',
abbr_pinyin='pl')
teacher_liang = Teacher.objects.create(tid=2, name='ๆขๅฅณๅฃซ', department=dept_phy, pinyin='liangqin', abbr_pinyin='lq')
teacher_zhao = Teacher.objects.create(tid=3, name='่ตตๅ
็', department=dept_phy, title='่ฎฒๅธ', pinyin='zhaohao',
abbr_pinyin='zh')
category = Category.objects.create(name='้่ฏ')
c1 = Course.objects.create(code='CS2500', name='็ฎๆณไธๅคๆๆง', credit=2, department=dept_seiee,
main_teacher=teacher_gao)
c1.teacher_group.add(teacher_gao)
c2 = Course.objects.create(code='CS1500', name='่ฎก็ฎๆบ็งๅญฆๅฏผ่ฎบ', credit=4, department=dept_seiee,
main_teacher=teacher_gao)
c2.teacher_group.add(teacher_gao)
c2.teacher_group.add(teacher_pan)
c3 = Course.objects.create(code='MARX1001', name='ๆๆณ้ๅพทไฟฎๅ
ปไธๆณๅพๅบ็ก', credit=3, department=dept_phy,
main_teacher=teacher_liang, category=category)
c3.teacher_group.add(teacher_liang)
c4 = Course.objects.create(code='MARX1001', name='ๆๆณ้ๅพทไฟฎๅ
ปไธๆณๅพๅบ็ก', credit=3, department=dept_phy,
main_teacher=teacher_zhao, category=category)
c4.teacher_group.add(teacher_zhao)
FormerCode.objects.create(old_code='CS250', new_code='CS2500')
FormerCode.objects.create(old_code='CS251', new_code='CS2500')
FormerCode.objects.create(old_code='CS150', new_code='CS1500')
FormerCode.objects.create(old_code='TH000', new_code='MARX1001')
Semester.objects.create(name='2021-2022-1')
Semester.objects.create(name='2021-2022-2')
Semester.objects.create(name='2021-2022-3')
User.objects.create(username='test')
def create_review(username: str = 'test', code: str = 'CS1500', rating: int = 3) -> Review:
user, _ = User.objects.get_or_create(username=username)
course = Course.objects.get(code=code)
review = Review.objects.create(user=user, course=course, comment='TEST', rating=rating, score='W',
semester=Semester.objects.get(name='2021-2022-1'))
Action.objects.create(review=review, user=user, action=1)
return review | jcourse_api/tests/__init__.py | from jcourse_api.models import *
def create_test_env() -> None:
dept_seiee = Department.objects.create(name='SEIEE')
dept_phy = Department.objects.create(name='PHYSICS')
teacher_gao = Teacher.objects.create(tid=1, name='้ซๅฅณๅฃซ', department=dept_seiee, title='ๆๆ', pinyin='gaoxiaofeng',
abbr_pinyin='gxf')
teacher_pan = Teacher.objects.create(tid=4, name='ๆฝ่ๅธ', department=dept_seiee, title='ๆๆ', pinyin='panli',
abbr_pinyin='pl')
teacher_liang = Teacher.objects.create(tid=2, name='ๆขๅฅณๅฃซ', department=dept_phy, pinyin='liangqin', abbr_pinyin='lq')
teacher_zhao = Teacher.objects.create(tid=3, name='่ตตๅ
็', department=dept_phy, title='่ฎฒๅธ', pinyin='zhaohao',
abbr_pinyin='zh')
category = Category.objects.create(name='้่ฏ')
c1 = Course.objects.create(code='CS2500', name='็ฎๆณไธๅคๆๆง', credit=2, department=dept_seiee,
main_teacher=teacher_gao)
c1.teacher_group.add(teacher_gao)
c2 = Course.objects.create(code='CS1500', name='่ฎก็ฎๆบ็งๅญฆๅฏผ่ฎบ', credit=4, department=dept_seiee,
main_teacher=teacher_gao)
c2.teacher_group.add(teacher_gao)
c2.teacher_group.add(teacher_pan)
c3 = Course.objects.create(code='MARX1001', name='ๆๆณ้ๅพทไฟฎๅ
ปไธๆณๅพๅบ็ก', credit=3, department=dept_phy,
main_teacher=teacher_liang, category=category)
c3.teacher_group.add(teacher_liang)
c4 = Course.objects.create(code='MARX1001', name='ๆๆณ้ๅพทไฟฎๅ
ปไธๆณๅพๅบ็ก', credit=3, department=dept_phy,
main_teacher=teacher_zhao, category=category)
c4.teacher_group.add(teacher_zhao)
FormerCode.objects.create(old_code='CS250', new_code='CS2500')
FormerCode.objects.create(old_code='CS251', new_code='CS2500')
FormerCode.objects.create(old_code='CS150', new_code='CS1500')
FormerCode.objects.create(old_code='TH000', new_code='MARX1001')
Semester.objects.create(name='2021-2022-1')
Semester.objects.create(name='2021-2022-2')
Semester.objects.create(name='2021-2022-3')
User.objects.create(username='test')
def create_review(username: str = 'test', code: str = 'CS1500', rating: int = 3) -> Review:
user, _ = User.objects.get_or_create(username=username)
course = Course.objects.get(code=code)
review = Review.objects.create(user=user, course=course, comment='TEST', rating=rating, score='W',
semester=Semester.objects.get(name='2021-2022-1'))
Action.objects.create(review=review, user=user, action=1)
return review | 0.287968 | 0.190178 |
_base_ = [
'../_base_/datasets/scannet-3d-18class.py', '../_base_/models/votenet.py', '../_base_/default_runtime.py'
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2
)
model = dict(
backbone=dict(
_delete_=True,
type='Pointformer',
num_points=(2048, 1024, 512, 256),
radius=(0.2, 0.4, 0.8, 1.2),
num_samples=(64, 32, 16, 16),
basic_channels=64,
fp_channels=((256, 256), (256, 256)),
num_heads=8,
num_layers=2,
ratios=(1, 1, 1, 1),
use_decoder=(False, False, False, False),
use_lin_enc=False,
cloud_points=40000,
global_drop=0.2,
decoder_drop=0.0,
prenorm=True,
norm_cfg=dict(type='BN2d')),
bbox_head=dict(
num_classes=18,
bbox_coder=dict(
type='PartialBinBasedBBoxCoder',
num_sizes=18,
num_dir_bins=24,
with_rot=False,
mean_sizes=[[0.76966727, 0.8116021, 0.92573744],
[1.876858, 1.8425595, 1.1931566],
[0.61328, 0.6148609, 0.7182701],
[1.3955007, 1.5121545, 0.83443564],
[0.97949594, 1.0675149, 0.6329687],
[0.531663, 0.5955577, 1.7500148],
[0.9624706, 0.72462326, 1.1481868],
[0.83221924, 1.0490936, 1.6875663],
[0.21132214, 0.4206159, 0.5372846],
[1.4440073, 1.8970833, 0.26985747],
[1.0294262, 1.4040797, 0.87554324],
[1.3766412, 0.65521795, 1.6813129],
[0.6650819, 0.71111923, 1.298853],
[0.41999173, 0.37906948, 1.7513971],
[0.59359556, 0.5912492, 0.73919016],
[0.50867593, 0.50656086, 0.30136237],
[1.1511526, 1.0546296, 0.49706793],
[0.47535285, 0.49249494, 0.5802117]]
),
# keypoint_contrastive_loss=dict(
# type='SupConLoss',
# )
)
)
optimizer = dict(type='AdamW', lr=2e-3, weight_decay=1e-1)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
lr_config = dict(policy='step', warmup=None, step=[32, 40], gamma=0.3)
total_epochs = 48 | configs/pointformer/votenet_ptr_scannet-3d-18class.py | _base_ = [
'../_base_/datasets/scannet-3d-18class.py', '../_base_/models/votenet.py', '../_base_/default_runtime.py'
]
data = dict(
samples_per_gpu=2,
workers_per_gpu=2
)
model = dict(
backbone=dict(
_delete_=True,
type='Pointformer',
num_points=(2048, 1024, 512, 256),
radius=(0.2, 0.4, 0.8, 1.2),
num_samples=(64, 32, 16, 16),
basic_channels=64,
fp_channels=((256, 256), (256, 256)),
num_heads=8,
num_layers=2,
ratios=(1, 1, 1, 1),
use_decoder=(False, False, False, False),
use_lin_enc=False,
cloud_points=40000,
global_drop=0.2,
decoder_drop=0.0,
prenorm=True,
norm_cfg=dict(type='BN2d')),
bbox_head=dict(
num_classes=18,
bbox_coder=dict(
type='PartialBinBasedBBoxCoder',
num_sizes=18,
num_dir_bins=24,
with_rot=False,
mean_sizes=[[0.76966727, 0.8116021, 0.92573744],
[1.876858, 1.8425595, 1.1931566],
[0.61328, 0.6148609, 0.7182701],
[1.3955007, 1.5121545, 0.83443564],
[0.97949594, 1.0675149, 0.6329687],
[0.531663, 0.5955577, 1.7500148],
[0.9624706, 0.72462326, 1.1481868],
[0.83221924, 1.0490936, 1.6875663],
[0.21132214, 0.4206159, 0.5372846],
[1.4440073, 1.8970833, 0.26985747],
[1.0294262, 1.4040797, 0.87554324],
[1.3766412, 0.65521795, 1.6813129],
[0.6650819, 0.71111923, 1.298853],
[0.41999173, 0.37906948, 1.7513971],
[0.59359556, 0.5912492, 0.73919016],
[0.50867593, 0.50656086, 0.30136237],
[1.1511526, 1.0546296, 0.49706793],
[0.47535285, 0.49249494, 0.5802117]]
),
# keypoint_contrastive_loss=dict(
# type='SupConLoss',
# )
)
)
optimizer = dict(type='AdamW', lr=2e-3, weight_decay=1e-1)
optimizer_config = dict(grad_clip=dict(max_norm=10, norm_type=2))
lr_config = dict(policy='step', warmup=None, step=[32, 40], gamma=0.3)
total_epochs = 48 | 0.395601 | 0.244352 |
import csv
import re
import dateutil.parser
import scrape_util
from urllib.request import Request, urlopen
from sys import argv
from bs4 import BeautifulSoup
default_sale, base_url, prefix = scrape_util.get_market(argv)
strip_char = ';,. \n\t'
def get_sale_date(date_head):
"""Return the date of the livestock sale."""
date_string = date_head[0]
report_date = date_string.split(",")
reportmd = report_date[0].split()[-2:]
reportmd.append(report_date[-1])
date_string = str(reportmd)
sale_date = dateutil.parser.parse(date_string, fuzzy = True)
return sale_date
def get_sale_head(date_head):
"""Return the date of the livestock sale."""
head_string = date_head[-1].replace("\n","").strip()
return head_string
def is_sale(line):
line = [this_col for this_col in line if this_col]
has_price = re.search(r'\$[0-9]+', line[-1])
return bool(has_price) and len(line)==4
def get_sale_location(location):
if ',' in location:
sale_location = location.split(',')
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')', location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [location, '']
return sale_location
def get_sale(line):
sale_location = get_sale_location(line[0])
sale = {
'consignor_city': sale_location[0].title(),
'consignor_state': sale_location[1],
'cattle_avg_weight': re.sub(r'[^0-9\.]', '', line[2]),
'cattle_price_cwt': re.sub(r'[^0-9\.]', '', line[3]),
}
match = re.match(r'([0-9]+)\s(.*)', line[1])
if match:
sale['cattle_head'] = match.group(1)
sale['cattle_cattle'] = match.group(2)
sale = {k: v for k, v in sale.items() if v}
return sale
def write_sale(line, default_sale, writer):
for this_line in line:
if is_sale(this_line):
sale = default_sale.copy()
sale.update(get_sale(this_line))
writer.writerow(sale)
def main():
# Locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
report = ['/repsales.php']
for this_report in report:
# Download auxillary information
request = Request(
base_url + '/comm.php',
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
table = soup.find_all("table")
commentary = table[1].tr.td.get_text()
date_and_head = commentary.split(":")
sale_date = get_sale_date(date_and_head)
io_name = archive.new_csv(sale_date)
#Stop iteration if this report is already archived
if not io_name:
break
# Initialize the default sale dictionary
sale_head = get_sale_head(date_and_head)
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
'sale_head': sale_head,
})
# Download report
request = Request(
base_url + this_report,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
table = soup.find_all('table')
line = [[td.get_text() for td in tr.find_all('td')] for tr in table[1].find_all('tr')]
# Open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main() | _307_scrape.py | import csv
import re
import dateutil.parser
import scrape_util
from urllib.request import Request, urlopen
from sys import argv
from bs4 import BeautifulSoup
default_sale, base_url, prefix = scrape_util.get_market(argv)
strip_char = ';,. \n\t'
def get_sale_date(date_head):
"""Return the date of the livestock sale."""
date_string = date_head[0]
report_date = date_string.split(",")
reportmd = report_date[0].split()[-2:]
reportmd.append(report_date[-1])
date_string = str(reportmd)
sale_date = dateutil.parser.parse(date_string, fuzzy = True)
return sale_date
def get_sale_head(date_head):
"""Return the date of the livestock sale."""
head_string = date_head[-1].replace("\n","").strip()
return head_string
def is_sale(line):
line = [this_col for this_col in line if this_col]
has_price = re.search(r'\$[0-9]+', line[-1])
return bool(has_price) and len(line)==4
def get_sale_location(location):
if ',' in location:
sale_location = location.split(',')
else:
match = re.search(r'(.*?)(' + scrape_util.state + ')', location)
if match:
sale_location = [match.group(1), match.group(2)]
else:
sale_location = [location, '']
return sale_location
def get_sale(line):
sale_location = get_sale_location(line[0])
sale = {
'consignor_city': sale_location[0].title(),
'consignor_state': sale_location[1],
'cattle_avg_weight': re.sub(r'[^0-9\.]', '', line[2]),
'cattle_price_cwt': re.sub(r'[^0-9\.]', '', line[3]),
}
match = re.match(r'([0-9]+)\s(.*)', line[1])
if match:
sale['cattle_head'] = match.group(1)
sale['cattle_cattle'] = match.group(2)
sale = {k: v for k, v in sale.items() if v}
return sale
def write_sale(line, default_sale, writer):
for this_line in line:
if is_sale(this_line):
sale = default_sale.copy()
sale.update(get_sale(this_line))
writer.writerow(sale)
def main():
# Locate existing CSV files
archive = scrape_util.ArchiveFolder(argv, prefix)
report = ['/repsales.php']
for this_report in report:
# Download auxillary information
request = Request(
base_url + '/comm.php',
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
table = soup.find_all("table")
commentary = table[1].tr.td.get_text()
date_and_head = commentary.split(":")
sale_date = get_sale_date(date_and_head)
io_name = archive.new_csv(sale_date)
#Stop iteration if this report is already archived
if not io_name:
break
# Initialize the default sale dictionary
sale_head = get_sale_head(date_and_head)
this_default_sale = default_sale.copy()
this_default_sale.update({
'sale_year': sale_date.year,
'sale_month': sale_date.month,
'sale_day': sale_date.day,
'sale_head': sale_head,
})
# Download report
request = Request(
base_url + this_report,
headers = scrape_util.url_header,
)
with urlopen(request) as io:
soup = BeautifulSoup(io.read(), 'lxml')
table = soup.find_all('table')
line = [[td.get_text() for td in tr.find_all('td')] for tr in table[1].find_all('tr')]
# Open a new CSV file and write each sale
with io_name.open('w', encoding='utf-8') as io:
writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n')
writer.writeheader()
write_sale(line, this_default_sale, writer)
if __name__ == '__main__':
main() | 0.414899 | 0.117851 |
from abc import ABC, abstractmethod
class Environment(ABC):
"""
A reinforcement learning Environment.
In reinforcement learning, an Agent learns by interacting with an Environment.
An Environment defines the dynamics of a particular problem:
the states, the actions, the transitions between states, and the rewards given to the agent.
Environments are often used to benchmark reinforcement learning agents,
or to define real problems that the user hopes to solve using reinforcement learning.
"""
@property
@abstractmethod
def name(self):
"""
The name of the environment.
"""
@abstractmethod
def reset(self):
"""
Reset the environment and return a new intial state.
Returns
-------
State
The initial state for the next episode.
"""
@abstractmethod
def step(self, action):
"""
Apply an action and get the next state.
Parameters
----------
action : Action
The action to apply at the current time step.
Returns
-------
all.environments.State
The State of the environment after the action is applied.
This State object includes both the done flag and any additional "info"
float
The reward achieved by the previous action
"""
@abstractmethod
def render(self, **kwargs):
"""
Render the current environment state.
"""
@abstractmethod
def close(self):
"""
Clean up any extraneaous environment objects.
"""
@property
@abstractmethod
def state(self):
"""
The State of the Environment at the current timestep.
"""
@property
@abstractmethod
def state_space(self):
"""
The Space representing the range of observable states.
Returns
-------
Space
An object of type Space that represents possible states the agent may observe
"""
@property
def observation_space(self):
"""
Alias for Environemnt.state_space.
Returns
-------
Space
An object of type Space that represents possible states the agent may observe
"""
return self.state_space
@property
@abstractmethod
def action_space(self):
"""
The Space representing the range of possible actions.
Returns
-------
Space
An object of type Space that represents possible actions the agent may take
"""
@abstractmethod
def duplicate(self, n):
"""
Create n copies of this environment.
"""
@property
@abstractmethod
def device(self):
"""
The torch device the environment lives on.
""" | all/environments/abstract.py | from abc import ABC, abstractmethod
class Environment(ABC):
"""
A reinforcement learning Environment.
In reinforcement learning, an Agent learns by interacting with an Environment.
An Environment defines the dynamics of a particular problem:
the states, the actions, the transitions between states, and the rewards given to the agent.
Environments are often used to benchmark reinforcement learning agents,
or to define real problems that the user hopes to solve using reinforcement learning.
"""
@property
@abstractmethod
def name(self):
"""
The name of the environment.
"""
@abstractmethod
def reset(self):
"""
Reset the environment and return a new intial state.
Returns
-------
State
The initial state for the next episode.
"""
@abstractmethod
def step(self, action):
"""
Apply an action and get the next state.
Parameters
----------
action : Action
The action to apply at the current time step.
Returns
-------
all.environments.State
The State of the environment after the action is applied.
This State object includes both the done flag and any additional "info"
float
The reward achieved by the previous action
"""
@abstractmethod
def render(self, **kwargs):
"""
Render the current environment state.
"""
@abstractmethod
def close(self):
"""
Clean up any extraneaous environment objects.
"""
@property
@abstractmethod
def state(self):
"""
The State of the Environment at the current timestep.
"""
@property
@abstractmethod
def state_space(self):
"""
The Space representing the range of observable states.
Returns
-------
Space
An object of type Space that represents possible states the agent may observe
"""
@property
def observation_space(self):
"""
Alias for Environemnt.state_space.
Returns
-------
Space
An object of type Space that represents possible states the agent may observe
"""
return self.state_space
@property
@abstractmethod
def action_space(self):
"""
The Space representing the range of possible actions.
Returns
-------
Space
An object of type Space that represents possible actions the agent may take
"""
@abstractmethod
def duplicate(self, n):
"""
Create n copies of this environment.
"""
@property
@abstractmethod
def device(self):
"""
The torch device the environment lives on.
""" | 0.949867 | 0.887984 |
from coremltools.converters.mil.mil.passes.quantization_passes import AbstractQuantizationPass
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
import logging as _logging
from coremltools.converters._profile_utils import _profile
from tqdm import tqdm as _tqdm
from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import PassContainer
@_profile
def apply_common_pass_pipeline(prog, passes):
def _apply(passes, name="common"):
if len(passes) == 0:
return
_logging.debug("Program before {} passes:\n{}".format(name, prog))
prog.validate()
s = 'passes' if len(passes) > 1 else 'pass'
for p in _tqdm(passes, desc="Running MIL {} {}".format(name, s), unit=" passes"):
_logging.info('Performing pass: "{}"'.format(p))
PASS_REGISTRY[p](prog) if not isinstance(p, AbstractQuantizationPass) else p.apply(prog)
if isinstance(p, AbstractQuantizationPass) or not isinstance(PASS_REGISTRY[p], PassContainer):
prog.validate()
_logging.debug("Program after {} passes:\n{}".format(name, prog))
return
common_passes = [
"common::cast_optimization",
"common::const_elimination",
"common::sanitize_input_output_names",
"common::divide_to_multiply",
"common::add_conv_transpose_output_shape",
"common::const_elimination",
"common::loop_invariant_elimination",
"common::remove_symbolic_reshape",
'common::noop_elimination',
"common::fuse_matmul_weight_bias",
"common::fuse_linear_bias",
"common::fuse_gelu_tanh_approximation",
"common::fuse_gelu_exact",
"common::fuse_leaky_relu",
"common::rank0_expand_dims_swap",
"common::use_reflection_padding",
"common::merge_consecutive_paddings", # Should come after use_reflection_padding, which will introduce new padding layers
"common::pad_conv_connect", # Should come after merge_consecutive_paddings
'common::image_input_preprocess',
"common::replace_stack_reshape", # should come before detect_concat_interleave since it may add concat
"common::reduce_transposes",
"common::fuse_conv_scale",
"common::fuse_conv_bias",
"common::fuse_onehot_matmul_to_gather",
"common::fuse_layernorm_or_instancenorm", # should come after reduce_transposes, to detect instance_norm
"common::fuse_elementwise_to_batchnorm", # should come after fuse_layernorm_or_instancenorm
"common::fuse_reduce_mean", # should come after fuse_layernorm_or_instancenorm
"common::fuse_conv_batchnorm", # should come after fuse_elementwise_to_batchnorm
"common::fuse_conv_scale", # Re-run the fuse conv scale pass after the conv and batch_norm are fused
"common::fuse_conv_bias", # Re-run the fuse conv bias pass after the conv and batch_norm are fused
"common::detect_concat_interleave",
"common::concat_to_pixel_shuffle", # should come after detect_concat_interleave and after replace_stack_reshape
"common::dead_code_elimination", # always end with dce
]
_apply(common_passes, name="Common")
for p in passes:
if isinstance(p, AbstractQuantizationPass):
_apply([p], type(p).__name__)
cleanup_passes = [
"common::cast_optimization",
"common::const_elimination",
"common::loop_invariant_elimination",
"common::noop_elimination",
"common::dedup_op_and_var_names",
"common::reduce_transposes", # fuse_layernorm_or_instancenorm can potentially adding transposes
"common::topological_reorder",
"common::dead_code_elimination", # always end with dce
]
_apply(cleanup_passes, name="Clean up") | coremltools/converters/mil/mil/passes/apply_common_pass_pipeline.py |
from coremltools.converters.mil.mil.passes.quantization_passes import AbstractQuantizationPass
from coremltools.converters.mil.mil.passes.pass_registry import PASS_REGISTRY
import logging as _logging
from coremltools.converters._profile_utils import _profile
from tqdm import tqdm as _tqdm
from coremltools.converters.mil.experimental.passes.generic_pass_infrastructure import PassContainer
@_profile
def apply_common_pass_pipeline(prog, passes):
def _apply(passes, name="common"):
if len(passes) == 0:
return
_logging.debug("Program before {} passes:\n{}".format(name, prog))
prog.validate()
s = 'passes' if len(passes) > 1 else 'pass'
for p in _tqdm(passes, desc="Running MIL {} {}".format(name, s), unit=" passes"):
_logging.info('Performing pass: "{}"'.format(p))
PASS_REGISTRY[p](prog) if not isinstance(p, AbstractQuantizationPass) else p.apply(prog)
if isinstance(p, AbstractQuantizationPass) or not isinstance(PASS_REGISTRY[p], PassContainer):
prog.validate()
_logging.debug("Program after {} passes:\n{}".format(name, prog))
return
common_passes = [
"common::cast_optimization",
"common::const_elimination",
"common::sanitize_input_output_names",
"common::divide_to_multiply",
"common::add_conv_transpose_output_shape",
"common::const_elimination",
"common::loop_invariant_elimination",
"common::remove_symbolic_reshape",
'common::noop_elimination',
"common::fuse_matmul_weight_bias",
"common::fuse_linear_bias",
"common::fuse_gelu_tanh_approximation",
"common::fuse_gelu_exact",
"common::fuse_leaky_relu",
"common::rank0_expand_dims_swap",
"common::use_reflection_padding",
"common::merge_consecutive_paddings", # Should come after use_reflection_padding, which will introduce new padding layers
"common::pad_conv_connect", # Should come after merge_consecutive_paddings
'common::image_input_preprocess',
"common::replace_stack_reshape", # should come before detect_concat_interleave since it may add concat
"common::reduce_transposes",
"common::fuse_conv_scale",
"common::fuse_conv_bias",
"common::fuse_onehot_matmul_to_gather",
"common::fuse_layernorm_or_instancenorm", # should come after reduce_transposes, to detect instance_norm
"common::fuse_elementwise_to_batchnorm", # should come after fuse_layernorm_or_instancenorm
"common::fuse_reduce_mean", # should come after fuse_layernorm_or_instancenorm
"common::fuse_conv_batchnorm", # should come after fuse_elementwise_to_batchnorm
"common::fuse_conv_scale", # Re-run the fuse conv scale pass after the conv and batch_norm are fused
"common::fuse_conv_bias", # Re-run the fuse conv bias pass after the conv and batch_norm are fused
"common::detect_concat_interleave",
"common::concat_to_pixel_shuffle", # should come after detect_concat_interleave and after replace_stack_reshape
"common::dead_code_elimination", # always end with dce
]
_apply(common_passes, name="Common")
for p in passes:
if isinstance(p, AbstractQuantizationPass):
_apply([p], type(p).__name__)
cleanup_passes = [
"common::cast_optimization",
"common::const_elimination",
"common::loop_invariant_elimination",
"common::noop_elimination",
"common::dedup_op_and_var_names",
"common::reduce_transposes", # fuse_layernorm_or_instancenorm can potentially adding transposes
"common::topological_reorder",
"common::dead_code_elimination", # always end with dce
]
_apply(cleanup_passes, name="Clean up") | 0.560493 | 0.274528 |
from csv import DictReader
MAX_GLOBAL_CLOCKS = 24
MAX_COLUMN_CLOCKS = 12
def gen_rclk_int(grid):
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type in ["RCLK_INT_L", "RCLK_INT_R"]:
yield loc
def walk_tile(grid, start_loc, dy, clocks):
key = (start_loc, dy)
assert key not in clocks
clocks[key] = set()
x, y = start_loc
while True:
y += dy
loc = (x, y)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type != 'INT':
break
left_gridinfo = grid.gridinfo_at_loc((x - 1, y))
for site, site_type in left_gridinfo.sites.items():
if site_type in ['SLICEL', 'SLICEM']:
clocks[key].add(site)
right_gridinfo = grid.gridinfo_at_loc((x + 1, y))
for site, site_type in right_gridinfo.sites.items():
if site_type in ['SLICEL', 'SLICEM']:
clocks[key].add(site)
def populate_leafs(grid):
clocks = {}
for rclk_tile_loc in gen_rclk_int(grid):
walk_tile(grid, rclk_tile_loc, 1, clocks)
walk_tile(grid, rclk_tile_loc, -1, clocks)
return clocks
class ClockColumns():
def __init__(self, grid):
self.sites = {}
self.clocks_active = {}
self.global_clocks = set()
clock_leafs = populate_leafs(grid)
for key, sites in clock_leafs.items():
self.clocks_active[key] = set()
for site in sites:
self.sites[site] = key
def columns(self):
return self.clocks_active.keys()
def remove_column(self, disabled_columns):
for key in disabled_columns:
del self.clocks_active[key]
sites_to_remove = set()
for site, key in self.sites.items():
if key in disabled_columns:
sites_to_remove.add(site)
for site in sites_to_remove:
del self.sites[site]
def add_clock(self, site, clock):
key = self.sites[site]
if clock in self.clocks_active[key]:
# Clock already in use!
return True
if len(self.clocks_active[key]) >= MAX_COLUMN_CLOCKS:
# No more column clocks!
return False
if clock not in self.global_clocks:
if len(self.global_clocks) >= MAX_GLOBAL_CLOCKS:
# No more global clocks!
return False
self.global_clocks.add(clock)
self.clocks_active[key].add(clock)
return True
class GlobalClockBuffers():
def __init__(self, bufg_outputs_file):
self.bufgs = {}
self.unused_bufgs = set()
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx] = []
self.unused_bufgs.add(idx)
with open(bufg_outputs_file) as f:
for bufg in DictReader(f):
if bufg['hroute_output'] == 'all':
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx].append(bufg['site'])
else:
self.bufgs[int(bufg['hroute_output'])].append(bufg['site'])
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx].sort()
def random_bufg_for_hroute(self, hroute_idx, random_choice):
self.unused_bufgs.remove(hroute_idx)
return random_choice(self.bufgs[hroute_idx]), hroute_idx
def random_bufg(self, random_choice):
hroute_idx = random_choice(sorted(self.unused_bufgs))
return self.random_bufg_for_hroute(hroute_idx, random_choice)
def make_bufg(site, site_type, idx, ce_inputs, randlib):
if site_type in ['BUFGCE', 'BUFGCE_HDIO']:
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCE #(
.IS_CE_INVERTED({invert_ce}),
.CE_TYPE("{ce_type}")
) bufg_{idx} (
.CE({ce}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
invert_ce=randlib.randint(2),
ce_type=randlib.choice(["SYNC", "ASYNC"]),
ce=randlib.choice(ce_inputs))
elif site_type == 'BUFGCE_DIV':
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCE_DIV #(
.IS_CE_INVERTED({invert_ce}),
.CE_TYPE("{ce_type}"),
.BUFGCE_DIVIDE({bufce_divide})
) bufg_{idx} (
.CE({ce}),
.CLR({clr}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
invert_ce=randlib.randint(2),
ce_type=randlib.choice(["SYNC", "ASYNC"]),
ce=randlib.choice(ce_inputs),
clr=randlib.choice(ce_inputs),
bufce_divide=randlib.choice(range(1, 9)))
elif site_type == 'BUFG_PS':
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFG_PS #(
) bufg_{idx} (
.O(bufg_o_{idx})
);""".format(
loc=site, idx=idx)
elif site_type == 'BUFGCTRL':
preselect_i0 = randlib.randint(2)
if not preselect_i0:
preselect_i1 = randlib.randint(2)
else:
preselect_i1 = 0
s0 = randlib.choice(ce_inputs)
s1 = randlib.choice(ce_inputs)
if s0 == '0':
while s1 == '0':
s1 = randlib.choice(ce_inputs)
if s0 == '0' and s1 == '1':
invert_s0 = randlib.randint(2)
invert_s1 = 0
elif s0 == '1' and s1 == '0':
invert_s1 = randlib.randint(2)
invert_s0 = 0
elif s0 == '1' and s1 == '1':
invert_s0 = randlib.randint(2)
if invert_s0:
invert_s1 = 0
else:
invert_s1 = randlib.randint(2)
else:
invert_s0 = randlib.randint(2)
invert_s1 = randlib.randint(2)
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCTRL #(
.INIT_OUT({init_out}),
.IS_CE0_INVERTED({invert_ce0}),
.IS_CE1_INVERTED({invert_ce1}),
.IS_S0_INVERTED({invert_s0}),
.IS_S1_INVERTED({invert_s1}),
.IS_IGNORE0_INVERTED({invert_ignore0}),
.IS_IGNORE1_INVERTED({invert_ignore1}),
.PRESELECT_I0({preselect_i0}),
.PRESELECT_I1({preselect_i1})
) bufg_{idx} (
.IGNORE0({ignore0}),
.IGNORE1({ignore1}),
.S0({s0}),
.S1({s1}),
.CE0({ce0}),
.CE1({ce1}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
init_out=randlib.randint(2),
s0=s0,
s1=s1,
ce0=randlib.choice(ce_inputs),
ce1=randlib.choice(ce_inputs),
ignore0=randlib.choice(ce_inputs),
ignore1=randlib.choice(ce_inputs),
invert_ce0=randlib.randint(2),
invert_ce1=randlib.randint(2),
invert_s0=invert_s0,
invert_s1=invert_s1,
invert_ignore0=randlib.randint(2),
invert_ignore1=randlib.randint(2),
preselect_i0=preselect_i0,
preselect_i1=preselect_i1,
)
else:
assert False, site_type
return s, 'bufg_o_{idx}'.format(idx=idx) | utils/clock_utils.py |
from csv import DictReader
MAX_GLOBAL_CLOCKS = 24
MAX_COLUMN_CLOCKS = 12
def gen_rclk_int(grid):
for tile_name in sorted(grid.tiles()):
loc = grid.loc_of_tilename(tile_name)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type in ["RCLK_INT_L", "RCLK_INT_R"]:
yield loc
def walk_tile(grid, start_loc, dy, clocks):
key = (start_loc, dy)
assert key not in clocks
clocks[key] = set()
x, y = start_loc
while True:
y += dy
loc = (x, y)
gridinfo = grid.gridinfo_at_loc(loc)
if gridinfo.tile_type != 'INT':
break
left_gridinfo = grid.gridinfo_at_loc((x - 1, y))
for site, site_type in left_gridinfo.sites.items():
if site_type in ['SLICEL', 'SLICEM']:
clocks[key].add(site)
right_gridinfo = grid.gridinfo_at_loc((x + 1, y))
for site, site_type in right_gridinfo.sites.items():
if site_type in ['SLICEL', 'SLICEM']:
clocks[key].add(site)
def populate_leafs(grid):
clocks = {}
for rclk_tile_loc in gen_rclk_int(grid):
walk_tile(grid, rclk_tile_loc, 1, clocks)
walk_tile(grid, rclk_tile_loc, -1, clocks)
return clocks
class ClockColumns():
def __init__(self, grid):
self.sites = {}
self.clocks_active = {}
self.global_clocks = set()
clock_leafs = populate_leafs(grid)
for key, sites in clock_leafs.items():
self.clocks_active[key] = set()
for site in sites:
self.sites[site] = key
def columns(self):
return self.clocks_active.keys()
def remove_column(self, disabled_columns):
for key in disabled_columns:
del self.clocks_active[key]
sites_to_remove = set()
for site, key in self.sites.items():
if key in disabled_columns:
sites_to_remove.add(site)
for site in sites_to_remove:
del self.sites[site]
def add_clock(self, site, clock):
key = self.sites[site]
if clock in self.clocks_active[key]:
# Clock already in use!
return True
if len(self.clocks_active[key]) >= MAX_COLUMN_CLOCKS:
# No more column clocks!
return False
if clock not in self.global_clocks:
if len(self.global_clocks) >= MAX_GLOBAL_CLOCKS:
# No more global clocks!
return False
self.global_clocks.add(clock)
self.clocks_active[key].add(clock)
return True
class GlobalClockBuffers():
def __init__(self, bufg_outputs_file):
self.bufgs = {}
self.unused_bufgs = set()
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx] = []
self.unused_bufgs.add(idx)
with open(bufg_outputs_file) as f:
for bufg in DictReader(f):
if bufg['hroute_output'] == 'all':
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx].append(bufg['site'])
else:
self.bufgs[int(bufg['hroute_output'])].append(bufg['site'])
for idx in range(MAX_GLOBAL_CLOCKS):
self.bufgs[idx].sort()
def random_bufg_for_hroute(self, hroute_idx, random_choice):
self.unused_bufgs.remove(hroute_idx)
return random_choice(self.bufgs[hroute_idx]), hroute_idx
def random_bufg(self, random_choice):
hroute_idx = random_choice(sorted(self.unused_bufgs))
return self.random_bufg_for_hroute(hroute_idx, random_choice)
def make_bufg(site, site_type, idx, ce_inputs, randlib):
if site_type in ['BUFGCE', 'BUFGCE_HDIO']:
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCE #(
.IS_CE_INVERTED({invert_ce}),
.CE_TYPE("{ce_type}")
) bufg_{idx} (
.CE({ce}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
invert_ce=randlib.randint(2),
ce_type=randlib.choice(["SYNC", "ASYNC"]),
ce=randlib.choice(ce_inputs))
elif site_type == 'BUFGCE_DIV':
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCE_DIV #(
.IS_CE_INVERTED({invert_ce}),
.CE_TYPE("{ce_type}"),
.BUFGCE_DIVIDE({bufce_divide})
) bufg_{idx} (
.CE({ce}),
.CLR({clr}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
invert_ce=randlib.randint(2),
ce_type=randlib.choice(["SYNC", "ASYNC"]),
ce=randlib.choice(ce_inputs),
clr=randlib.choice(ce_inputs),
bufce_divide=randlib.choice(range(1, 9)))
elif site_type == 'BUFG_PS':
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFG_PS #(
) bufg_{idx} (
.O(bufg_o_{idx})
);""".format(
loc=site, idx=idx)
elif site_type == 'BUFGCTRL':
preselect_i0 = randlib.randint(2)
if not preselect_i0:
preselect_i1 = randlib.randint(2)
else:
preselect_i1 = 0
s0 = randlib.choice(ce_inputs)
s1 = randlib.choice(ce_inputs)
if s0 == '0':
while s1 == '0':
s1 = randlib.choice(ce_inputs)
if s0 == '0' and s1 == '1':
invert_s0 = randlib.randint(2)
invert_s1 = 0
elif s0 == '1' and s1 == '0':
invert_s1 = randlib.randint(2)
invert_s0 = 0
elif s0 == '1' and s1 == '1':
invert_s0 = randlib.randint(2)
if invert_s0:
invert_s1 = 0
else:
invert_s1 = randlib.randint(2)
else:
invert_s0 = randlib.randint(2)
invert_s1 = randlib.randint(2)
s = """
wire bufg_o_{idx};
(* LOC="{loc}", KEEP, DONT_TOUCH *) BUFGCTRL #(
.INIT_OUT({init_out}),
.IS_CE0_INVERTED({invert_ce0}),
.IS_CE1_INVERTED({invert_ce1}),
.IS_S0_INVERTED({invert_s0}),
.IS_S1_INVERTED({invert_s1}),
.IS_IGNORE0_INVERTED({invert_ignore0}),
.IS_IGNORE1_INVERTED({invert_ignore1}),
.PRESELECT_I0({preselect_i0}),
.PRESELECT_I1({preselect_i1})
) bufg_{idx} (
.IGNORE0({ignore0}),
.IGNORE1({ignore1}),
.S0({s0}),
.S1({s1}),
.CE0({ce0}),
.CE1({ce1}),
.O(bufg_o_{idx})
);""".format(
loc=site,
idx=idx,
init_out=randlib.randint(2),
s0=s0,
s1=s1,
ce0=randlib.choice(ce_inputs),
ce1=randlib.choice(ce_inputs),
ignore0=randlib.choice(ce_inputs),
ignore1=randlib.choice(ce_inputs),
invert_ce0=randlib.randint(2),
invert_ce1=randlib.randint(2),
invert_s0=invert_s0,
invert_s1=invert_s1,
invert_ignore0=randlib.randint(2),
invert_ignore1=randlib.randint(2),
preselect_i0=preselect_i0,
preselect_i1=preselect_i1,
)
else:
assert False, site_type
return s, 'bufg_o_{idx}'.format(idx=idx) | 0.474875 | 0.272956 |
import numpy as np
from random import randint
from PIL import Image
# Basic implementation of recursive division maze generation
# Credit to Wikipedia.org Maze Generation
# 1s will represent wall 0s will represent white space
# All positions are measured from the top left corner (0,0)
Debug = True
class Chamber:
def __init__(self, array, row, column):
self.array = array
self.x = array.shape[0]
self.y = array.shape[1]
if Debug:
print(self.x, self.y)
self.row = row
self.column = column
def divide(self):
array = self.array
x_line = randint(2, self.x-2)
y_line = randint(2, self.y-2)
if Debug:
print("x_line " + str(x_line))
print("y_line " + str(y_line))
y_gap = randint(2, self.y-2)
third_gap = randint(0,1)
array[x_line, :] = 1
array[:, y_line] = 1
randomizer = randint(0, 1)
if randomizer == 0:
y_gap = randint(2, self.y - 2)
x_gap1 = randint(2, self.x - 2)
x_gap2 = randint(2, self.x - 2)
for i in [x_gap1, x_gap2]:
array[i, y_line] = 0
array[x_line, y_gap] = 0
else:
x_gap = randint(2, self.x - 2)
y_gap1 = randint(2, self.y - 2)
y_gap2 = randint(2, self.y - 2)
for i in [y_gap1, y_gap2]:
array[x_line, i] = 0
array[x_gap, y_line] = 0
if Debug:
print(maze.array)
inner_arrays = []
# Top left quadrant
inner_arrays.append(Chamber(np.zeros((x_line, y_line)), self.row, self.column))
# Top right quadrant
inner_arrays.append(Chamber(np.zeros((x_line, self.y - y_line - 1)), self.row, self.column + y_line + 1))
# Bottom left quadrant
inner_arrays.append(Chamber(np.zeros((self.x - x_line, y_line)), self.row + x_line + 1, self.column))
# Bottom right quadrant
inner_arrays.append(Chamber(np.zeros((self.y - x_line - 1, self.y - y_line - 1)), self.row + x_line + 1, self.column + y_line + 1))
if Debug:
for i in inner_arrays:
print(i.row, i.column)
print(i.array)
return inner_arrays
def create_maze(size):
# Create a starting blank chamber
maze = np.zeros((size, size, dtype=bool))
return maze
def recurse(arraylist):
inner_arrays = []
for inner_maze in arraylist:
if inner_maze.x > 2 and inner_maze.y > 2:
children = inner_maze.divide()
for i in children:
inner_arrays.append(children)
else:
inner_arrays.append(inner_maze)
"""
if inner_arrays == arraylist:
return arraylist
else:
recurse(inner_arrays)
"""
return inner_arrays
def combine_arrays(maze, inner_arrays):
for matrix in inner_arrays:
rows = matrix.x - 1
columns = matrix.y - 1
startrow = matrix.row
startcolumn = matrix.column
matrix = matrix.array
for i in range(startrow, startrow + rows):
for j in range(startcolumn, startcolumn + columns):
maze[i][j] = matrix[i-startrow][j-startcolumn]
return maze
def create_boarder(array):
# Change this to append and create a boarder
size = 0
maze[0, :] = 1
maze[:, 0] = 1
maze[size, :] = 1
maze[:, size] = 1
maze = Chamber(create_maze(20), 0, 0)
#print(maze.array)
inner_arrays = maze.divide()
# children = recurse(inner_arrays)
# print(children)
# new_maze = combine_arrays(maze.array, children)
# I need you to work on this image conversion:
#image = Image.fromarray(maze.array, '1')
#image.save('genmaze.png')
#image.show() | Oldcrap/generator.py | import numpy as np
from random import randint
from PIL import Image
# Basic implementation of recursive division maze generation
# Credit to Wikipedia.org Maze Generation
# 1s will represent wall 0s will represent white space
# All positions are measured from the top left corner (0,0)
Debug = True
class Chamber:
def __init__(self, array, row, column):
self.array = array
self.x = array.shape[0]
self.y = array.shape[1]
if Debug:
print(self.x, self.y)
self.row = row
self.column = column
def divide(self):
array = self.array
x_line = randint(2, self.x-2)
y_line = randint(2, self.y-2)
if Debug:
print("x_line " + str(x_line))
print("y_line " + str(y_line))
y_gap = randint(2, self.y-2)
third_gap = randint(0,1)
array[x_line, :] = 1
array[:, y_line] = 1
randomizer = randint(0, 1)
if randomizer == 0:
y_gap = randint(2, self.y - 2)
x_gap1 = randint(2, self.x - 2)
x_gap2 = randint(2, self.x - 2)
for i in [x_gap1, x_gap2]:
array[i, y_line] = 0
array[x_line, y_gap] = 0
else:
x_gap = randint(2, self.x - 2)
y_gap1 = randint(2, self.y - 2)
y_gap2 = randint(2, self.y - 2)
for i in [y_gap1, y_gap2]:
array[x_line, i] = 0
array[x_gap, y_line] = 0
if Debug:
print(maze.array)
inner_arrays = []
# Top left quadrant
inner_arrays.append(Chamber(np.zeros((x_line, y_line)), self.row, self.column))
# Top right quadrant
inner_arrays.append(Chamber(np.zeros((x_line, self.y - y_line - 1)), self.row, self.column + y_line + 1))
# Bottom left quadrant
inner_arrays.append(Chamber(np.zeros((self.x - x_line, y_line)), self.row + x_line + 1, self.column))
# Bottom right quadrant
inner_arrays.append(Chamber(np.zeros((self.y - x_line - 1, self.y - y_line - 1)), self.row + x_line + 1, self.column + y_line + 1))
if Debug:
for i in inner_arrays:
print(i.row, i.column)
print(i.array)
return inner_arrays
def create_maze(size):
# Create a starting blank chamber
maze = np.zeros((size, size, dtype=bool))
return maze
def recurse(arraylist):
inner_arrays = []
for inner_maze in arraylist:
if inner_maze.x > 2 and inner_maze.y > 2:
children = inner_maze.divide()
for i in children:
inner_arrays.append(children)
else:
inner_arrays.append(inner_maze)
"""
if inner_arrays == arraylist:
return arraylist
else:
recurse(inner_arrays)
"""
return inner_arrays
def combine_arrays(maze, inner_arrays):
for matrix in inner_arrays:
rows = matrix.x - 1
columns = matrix.y - 1
startrow = matrix.row
startcolumn = matrix.column
matrix = matrix.array
for i in range(startrow, startrow + rows):
for j in range(startcolumn, startcolumn + columns):
maze[i][j] = matrix[i-startrow][j-startcolumn]
return maze
def create_boarder(array):
# Change this to append and create a boarder
size = 0
maze[0, :] = 1
maze[:, 0] = 1
maze[size, :] = 1
maze[:, size] = 1
maze = Chamber(create_maze(20), 0, 0)
#print(maze.array)
inner_arrays = maze.divide()
# children = recurse(inner_arrays)
# print(children)
# new_maze = combine_arrays(maze.array, children)
# I need you to work on this image conversion:
#image = Image.fromarray(maze.array, '1')
#image.save('genmaze.png')
#image.show() | 0.422386 | 0.542742 |
import logging
from typing import Optional
from fastapi import HTTPException, Depends
from fastapi.encoders import jsonable_encoder
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED
from sqlalchemy.exc import IntegrityError
from dispatch.plugins.base import plugins
from dispatch.config import (
DISPATCH_AUTHENTICATION_PROVIDER_SLUG,
DISPATCH_AUTHENTICATION_DEFAULT_USER,
)
from dispatch.organization import service as organization_service
from dispatch.project import service as project_service
from dispatch.enums import UserRoles
from .models import (
DispatchUser,
DispatchUserOrganization,
DispatchUserProject,
UserOrganization,
UserProject,
UserRegister,
UserUpdate,
)
log = logging.getLogger(__name__)
InvalidCredentialException = HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Could not validate credentials"
)
def get(*, db_session, user_id: int) -> Optional[DispatchUser]:
"""Returns a user based on the given user id."""
return db_session.query(DispatchUser).filter(DispatchUser.id == user_id).one_or_none()
def get_by_email(*, db_session, email: str) -> Optional[DispatchUser]:
"""Returns a user object based on user email."""
return db_session.query(DispatchUser).filter(DispatchUser.email == email).one_or_none()
def create_or_update_project_role(*, db_session, user: DispatchUser, role_in: UserProject):
"""Creates a new project role or updates an existing role."""
if not role_in.project.id:
project = project_service.get_by_name(db_session=db_session, name=role_in.project.name)
project_id = project.id
else:
project_id = role_in.project.id
project_role = (
db_session.query(DispatchUserProject)
.filter(
DispatchUserProject.dispatch_user_id == user.id,
)
.filter(DispatchUserProject.project_id == project_id)
.one_or_none()
)
if not project_role:
return DispatchUserProject(
project_id=project_id,
role=role_in.role,
)
project_role.role = role_in.role
return project_role
def create_or_update_organization_role(
*, db_session, user: DispatchUser, role_in: UserOrganization
):
"""Creates a new organization role or updates an existing role."""
if not role_in.organization.id:
organization = organization_service.get_by_name(
db_session=db_session, name=role_in.organization.name
)
organization_id = organization.id
else:
organization_id = role_in.organization.id
organization_role = (
db_session.query(DispatchUserOrganization)
.filter(
DispatchUserOrganization.dispatch_user_id == user.id,
)
.filter(DispatchUserOrganization.organization_id == organization_id)
.one_or_none()
)
if not organization_role:
return DispatchUserOrganization(
organization_id=organization.id,
role=role_in.role,
)
organization_role.role = role_in.role
return organization_role
def create(*, db_session, organization: str, user_in: UserRegister) -> DispatchUser:
"""Creates a new dispatch user."""
# pydantic forces a string password, but we really want bytes
password = bytes(user_in.password, "utf-8")
# create the user
user = DispatchUser(
**user_in.dict(exclude={"password", "organizations", "projects"}), password=password
)
org = organization_service.get_by_slug(db_session=db_session, slug=organization)
# add the user to the default organization
user.organizations.append(
DispatchUserOrganization(organization=org, role=UserRoles.member.value)
)
# get the default project
default_project = project_service.get_default(db_session=db_session)
# add the user to the default project
user.projects.append(DispatchUserProject(project=default_project, role=UserRoles.member.value))
db_session.add(user)
db_session.commit()
return user
def get_or_create(*, db_session, organization: str, user_in: UserRegister) -> DispatchUser:
"""Gets an existing user or creates a new one."""
try:
return create(db_session=db_session, organization=organization, user_in=user_in)
except IntegrityError:
db_session.rollback()
return get_by_email(db_session=db_session, email=user_in.email)
def update(*, db_session, user: DispatchUser, user_in: UserUpdate) -> DispatchUser:
"""Updates a user."""
user_data = jsonable_encoder(user)
update_data = user_in.dict(exclude={"password"}, skip_defaults=True)
for field in user_data:
if field in update_data:
setattr(user, field, update_data[field])
if user_in.password:
password = bytes(user_in.password, "<PASSWORD>")
user.password = password
if user_in.organizations:
roles = []
for role in user_in.organizations:
roles.append(
create_or_update_organization_role(db_session=db_session, user=user, role_in=role)
)
db_session.add(user)
db_session.commit()
return user
def get_current_user(request: Request) -> DispatchUser:
"""Attempts to get the current user depending on the configured authentication provider."""
if DISPATCH_AUTHENTICATION_PROVIDER_SLUG:
auth_plugin = plugins.get(DISPATCH_AUTHENTICATION_PROVIDER_SLUG)
user_email = auth_plugin.get_current_user(request)
else:
log.debug("No authentication provider. Default user will be used")
user_email = DISPATCH_AUTHENTICATION_DEFAULT_USER
if not user_email:
log.exception(
f"Unable to determine user email based on configured auth provider or no default auth user email defined. Provider: {DISPATCH_AUTHENTICATION_PROVIDER_SLUG}"
)
raise InvalidCredentialException
return get_or_create(
db_session=request.state.db,
organization=request.state.organization,
user_in=UserRegister(email=user_email),
)
def get_current_role(
request: Request, current_user: DispatchUser = Depends(get_current_user)
) -> UserRoles:
"""Attempts to get the current user depending on the configured authentication provider."""
return current_user.get_organization_role(organization_name=request.state.organization) | src/dispatch/auth/service.py | import logging
from typing import Optional
from fastapi import HTTPException, Depends
from fastapi.encoders import jsonable_encoder
from starlette.requests import Request
from starlette.status import HTTP_401_UNAUTHORIZED
from sqlalchemy.exc import IntegrityError
from dispatch.plugins.base import plugins
from dispatch.config import (
DISPATCH_AUTHENTICATION_PROVIDER_SLUG,
DISPATCH_AUTHENTICATION_DEFAULT_USER,
)
from dispatch.organization import service as organization_service
from dispatch.project import service as project_service
from dispatch.enums import UserRoles
from .models import (
DispatchUser,
DispatchUserOrganization,
DispatchUserProject,
UserOrganization,
UserProject,
UserRegister,
UserUpdate,
)
log = logging.getLogger(__name__)
InvalidCredentialException = HTTPException(
status_code=HTTP_401_UNAUTHORIZED, detail="Could not validate credentials"
)
def get(*, db_session, user_id: int) -> Optional[DispatchUser]:
"""Returns a user based on the given user id."""
return db_session.query(DispatchUser).filter(DispatchUser.id == user_id).one_or_none()
def get_by_email(*, db_session, email: str) -> Optional[DispatchUser]:
"""Returns a user object based on user email."""
return db_session.query(DispatchUser).filter(DispatchUser.email == email).one_or_none()
def create_or_update_project_role(*, db_session, user: DispatchUser, role_in: UserProject):
"""Creates a new project role or updates an existing role."""
if not role_in.project.id:
project = project_service.get_by_name(db_session=db_session, name=role_in.project.name)
project_id = project.id
else:
project_id = role_in.project.id
project_role = (
db_session.query(DispatchUserProject)
.filter(
DispatchUserProject.dispatch_user_id == user.id,
)
.filter(DispatchUserProject.project_id == project_id)
.one_or_none()
)
if not project_role:
return DispatchUserProject(
project_id=project_id,
role=role_in.role,
)
project_role.role = role_in.role
return project_role
def create_or_update_organization_role(
*, db_session, user: DispatchUser, role_in: UserOrganization
):
"""Creates a new organization role or updates an existing role."""
if not role_in.organization.id:
organization = organization_service.get_by_name(
db_session=db_session, name=role_in.organization.name
)
organization_id = organization.id
else:
organization_id = role_in.organization.id
organization_role = (
db_session.query(DispatchUserOrganization)
.filter(
DispatchUserOrganization.dispatch_user_id == user.id,
)
.filter(DispatchUserOrganization.organization_id == organization_id)
.one_or_none()
)
if not organization_role:
return DispatchUserOrganization(
organization_id=organization.id,
role=role_in.role,
)
organization_role.role = role_in.role
return organization_role
def create(*, db_session, organization: str, user_in: UserRegister) -> DispatchUser:
"""Creates a new dispatch user."""
# pydantic forces a string password, but we really want bytes
password = bytes(user_in.password, "utf-8")
# create the user
user = DispatchUser(
**user_in.dict(exclude={"password", "organizations", "projects"}), password=password
)
org = organization_service.get_by_slug(db_session=db_session, slug=organization)
# add the user to the default organization
user.organizations.append(
DispatchUserOrganization(organization=org, role=UserRoles.member.value)
)
# get the default project
default_project = project_service.get_default(db_session=db_session)
# add the user to the default project
user.projects.append(DispatchUserProject(project=default_project, role=UserRoles.member.value))
db_session.add(user)
db_session.commit()
return user
def get_or_create(*, db_session, organization: str, user_in: UserRegister) -> DispatchUser:
"""Gets an existing user or creates a new one."""
try:
return create(db_session=db_session, organization=organization, user_in=user_in)
except IntegrityError:
db_session.rollback()
return get_by_email(db_session=db_session, email=user_in.email)
def update(*, db_session, user: DispatchUser, user_in: UserUpdate) -> DispatchUser:
"""Updates a user."""
user_data = jsonable_encoder(user)
update_data = user_in.dict(exclude={"password"}, skip_defaults=True)
for field in user_data:
if field in update_data:
setattr(user, field, update_data[field])
if user_in.password:
password = bytes(user_in.password, "<PASSWORD>")
user.password = password
if user_in.organizations:
roles = []
for role in user_in.organizations:
roles.append(
create_or_update_organization_role(db_session=db_session, user=user, role_in=role)
)
db_session.add(user)
db_session.commit()
return user
def get_current_user(request: Request) -> DispatchUser:
"""Attempts to get the current user depending on the configured authentication provider."""
if DISPATCH_AUTHENTICATION_PROVIDER_SLUG:
auth_plugin = plugins.get(DISPATCH_AUTHENTICATION_PROVIDER_SLUG)
user_email = auth_plugin.get_current_user(request)
else:
log.debug("No authentication provider. Default user will be used")
user_email = DISPATCH_AUTHENTICATION_DEFAULT_USER
if not user_email:
log.exception(
f"Unable to determine user email based on configured auth provider or no default auth user email defined. Provider: {DISPATCH_AUTHENTICATION_PROVIDER_SLUG}"
)
raise InvalidCredentialException
return get_or_create(
db_session=request.state.db,
organization=request.state.organization,
user_in=UserRegister(email=user_email),
)
def get_current_role(
request: Request, current_user: DispatchUser = Depends(get_current_user)
) -> UserRoles:
"""Attempts to get the current user depending on the configured authentication provider."""
return current_user.get_organization_role(organization_name=request.state.organization) | 0.784236 | 0.0584 |
import copy
import datetime
import itertools
import json
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(
os.path.join(os.path.dirname(__file__), '../telescope')))
import iptranslation
import selector
import utils
class SelectorFileParserTest(unittest.TestCase):
def parse_file_contents(self, selector_file_contents):
parser = selector.SelectorFileParser()
return parser._parse_file_contents(selector_file_contents)
def assertSelectorMatches(self, selector_expected, selector_actual):
self.assertEqual(selector_expected.start_time,
selector_actual.start_time)
self.assertEqual(selector_expected.duration, selector_actual.duration)
self.assertEqual(selector_expected.metric, selector_actual.metric)
self.assertEqual(selector_expected.ip_translation_spec.strategy_name,
selector_actual.ip_translation_spec.strategy_name)
self.assertDictEqual(selector_expected.ip_translation_spec.params,
selector_actual.ip_translation_spec.params)
self.assertEqual(selector_expected.site, selector_actual.site)
self.assertEqual(selector_expected.client_provider,
selector_actual.client_provider)
self.assertEqual(selector_expected.client_country,
selector_actual.client_country)
def assertParsedSelectorsMatch(self, selectors_expected,
selector_file_contents):
selectors_actual = self.parse_file_contents(selector_file_contents)
self.assertEqual(len(selectors_expected), len(selectors_actual))
# The parser parses the subsets in reverse order, so we must compare
# selectors in reverse.
for i in reversed(range(len(selectors_expected))):
self.assertSelectorMatches(selectors_expected[i],
selectors_actual[i])
def assertParsedSingleSelectorMatches(self, selector_expected,
selector_file_contents):
self.assertParsedSelectorsMatch([selector_expected],
selector_file_contents)
def testFailsParseForDeprecatedFileFormats(self):
selector_file_contents = """{
"file_format_version": 1,
"duration": "30d",
"metrics":"average_rtt",
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"subsets":[
{
"site":"lga02",
"client_provider":"comcast",
"start_time":"2014-02-01T00:00:00Z"
}
]
}"""
self.assertRaises(selector.SelectorParseError, self.parse_file_contents,
selector_file_contents)
def testFailsParseForv1_1WithDeprecatedSubsetFunction(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics":"average_rtt",
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"subsets":[
{
"site":"lga02",
"client_provider":"comcast",
"start_time":"2014-02-01T00:00:00Z"
},
{
"site":"lga01",
"client_provider":"comcast",
"start_time":"2014-02-01T00:00:00Z"
}
]
}"""
self.assertRaises(selector.SelectorParseError, self.parse_file_contents,
selector_file_contents)
def testSuccessfulParseOfValidv1_1FileWithAllOptionalFieldsDefined(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"sites": ["lga02"],
"client_providers": ["comcast"],
"client_countries": ["us"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.site = 'lga02'
selector_expected.client_provider = 'comcast'
selector_expected.client_country = 'us'
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_Complex(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["minimum_rtt", "download_throughput", "average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"sites": ["lga01", "lga02"],
"client_providers": ["comcast", "verizon"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selectors_expected = []
selector_base = selector.Selector()
selector_base.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_base.duration = 30 * 24 * 60 * 60
selector_base.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
sites = ['lga01', 'lga02']
client_providers = ['comcast', 'verizon']
metrics = ['minimum_rtt', 'download_throughput', 'average_rtt']
for client_provider, site, metric in itertools.product(
client_providers, sites, metrics):
selector_copy = copy.copy(selector_base)
selector_copy.metric = metric
selector_copy.client_provider = client_provider
selector_copy.site = site
selectors_expected.append(selector_copy)
self.assertParsedSelectorsMatch(selectors_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_NoLocationValues(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_CountriesCaseInsensitivity(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"start_times": ["2014-02-01T00:00:00Z"],
"client_countries": ["us", "Ca", "uK", "AU"]
}"""
selectors_expected = []
selector_base = selector.Selector()
selector_base.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_base.duration = 30 * 24 * 60 * 60
selector_base.metric = "average_rtt"
selector_base.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
for client_country in ('us', 'ca', 'uk', 'au'):
selector_copy = copy.copy(selector_base)
selector_copy.client_country = client_country
selectors_expected.append(selector_copy)
self.assertParsedSelectorsMatch(selectors_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_SingleLocationValues_onlySites(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"sites": ["lga02"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.site = 'lga02'
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_SingleLocationValues_onlyClientProviders(
self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"client_providers": ["comcast"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.client_provider = 'comcast'
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_SingleLocationValues_onlyClientCountries(
self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"client_countries": ["us"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.client_country = 'us'
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_EmptyListValue_OptionalParameter(self):
"""Empty list on optional parameter (client_countries) is handled as None"""
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"client_countries": [],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.client_country = None
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_EmptyListValue_RequiredParameter(self):
"""An empty list for the required "metrics" field should raise an error."""
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": [],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"client_countries": ["us"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
self.assertRaises(selector.SelectorParseError, self.parse_file_contents,
selector_file_contents)
def testValidInput_v1dot1_NoOptionalValuesStillParses(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testFailsParseForInvalidJson(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation": {
"strategy": "maxmind",
"params": {
"db_snapshots": ["2014-08-04"]
}
},
"sites": ["lga02"],
"client_providers": ["comcast"],
"start_times": ["2014-02-01T00:00:00Z"]
"""
# The final closing curly brace is missing, so this should fail
self.assertRaises(selector.SelectorParseError, self.parse_file_contents,
selector_file_contents)
class MultiSelectorJsonEncoderTest(unittest.TestCase):
def setUp(self):
# Disable maxDiff, as diffing JSON can generate large diffs.
self.maxDiff = None
def assertJsonEqual(self, expected, actual):
self.assertDictEqual(json.loads(expected), json.loads(actual))
def testEncodeMultiSelectorOneElement(self):
s = selector.MultiSelector()
s.start_times = [datetime.datetime(2015, 4, 2, 10, 27, 34)]
s.duration = 45
s.sites = ['mia01']
s.client_providers = ['twc']
s.client_countries = ['us']
s.metrics = ['upload_throughput']
s.ip_translation_spec = (iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2015-02-05']}))
encoded_expected = """
{
"file_format_version": 1.1,
"duration": "45d",
"metrics": ["upload_throughput"],
"ip_translation": {
"strategy": "maxmind",
"params": {
"db_snapshots": ["2015-02-05"]
}
},
"sites": ["mia01"],
"client_providers": ["twc"],
"client_countries": ["us"],
"start_times": ["2015-04-02T10:27:34Z"]
}"""
encoded_actual = selector.MultiSelectorJsonEncoder().encode(s)
self.assertJsonEqual(encoded_expected, encoded_actual)
def testEncodeMultiSelectorMultiElement(self):
s = selector.MultiSelector()
s.start_times = [
datetime.datetime(2015, 4, 1, 0, 0, 0),
datetime.datetime(2015, 4, 8, 0, 0, 0),
datetime.datetime(2015, 4, 15, 0, 0, 0),
]
s.duration = 7
s.sites = ['iad01', 'lga06', 'mia01', 'nuq03']
s.client_providers = ['comcast', 'twc', 'verizon']
s.metrics = ['download_throughput', 'upload_throughput', 'minimum_rtt']
s.ip_translation_spec = (iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2015-02-05']}))
encoded_expected = """
{
"file_format_version": 1.1,
"duration": "7d",
"metrics": ["download_throughput", "upload_throughput", "minimum_rtt"],
"ip_translation": {
"strategy": "maxmind",
"params": {
"db_snapshots": ["2015-02-05"]
}
},
"sites": ["iad01", "lga06", "mia01", "nuq03"],
"client_providers": ["comcast", "twc", "verizon"],
"start_times": ["2015-04-01T00:00:00Z",
"2015-04-08T00:00:00Z",
"2015-04-15T00:00:00Z"]
}"""
encoded_actual = selector.MultiSelectorJsonEncoder().encode(s)
self.assertJsonEqual(encoded_expected, encoded_actual)
if __name__ == '__main__':
unittest.main() | tests/test_selector.py |
import copy
import datetime
import itertools
import json
import os
import sys
import unittest
sys.path.insert(1, os.path.abspath(
os.path.join(os.path.dirname(__file__), '../telescope')))
import iptranslation
import selector
import utils
class SelectorFileParserTest(unittest.TestCase):
def parse_file_contents(self, selector_file_contents):
parser = selector.SelectorFileParser()
return parser._parse_file_contents(selector_file_contents)
def assertSelectorMatches(self, selector_expected, selector_actual):
self.assertEqual(selector_expected.start_time,
selector_actual.start_time)
self.assertEqual(selector_expected.duration, selector_actual.duration)
self.assertEqual(selector_expected.metric, selector_actual.metric)
self.assertEqual(selector_expected.ip_translation_spec.strategy_name,
selector_actual.ip_translation_spec.strategy_name)
self.assertDictEqual(selector_expected.ip_translation_spec.params,
selector_actual.ip_translation_spec.params)
self.assertEqual(selector_expected.site, selector_actual.site)
self.assertEqual(selector_expected.client_provider,
selector_actual.client_provider)
self.assertEqual(selector_expected.client_country,
selector_actual.client_country)
def assertParsedSelectorsMatch(self, selectors_expected,
selector_file_contents):
selectors_actual = self.parse_file_contents(selector_file_contents)
self.assertEqual(len(selectors_expected), len(selectors_actual))
# The parser parses the subsets in reverse order, so we must compare
# selectors in reverse.
for i in reversed(range(len(selectors_expected))):
self.assertSelectorMatches(selectors_expected[i],
selectors_actual[i])
def assertParsedSingleSelectorMatches(self, selector_expected,
selector_file_contents):
self.assertParsedSelectorsMatch([selector_expected],
selector_file_contents)
def testFailsParseForDeprecatedFileFormats(self):
selector_file_contents = """{
"file_format_version": 1,
"duration": "30d",
"metrics":"average_rtt",
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"subsets":[
{
"site":"lga02",
"client_provider":"comcast",
"start_time":"2014-02-01T00:00:00Z"
}
]
}"""
self.assertRaises(selector.SelectorParseError, self.parse_file_contents,
selector_file_contents)
def testFailsParseForv1_1WithDeprecatedSubsetFunction(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics":"average_rtt",
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"subsets":[
{
"site":"lga02",
"client_provider":"comcast",
"start_time":"2014-02-01T00:00:00Z"
},
{
"site":"lga01",
"client_provider":"comcast",
"start_time":"2014-02-01T00:00:00Z"
}
]
}"""
self.assertRaises(selector.SelectorParseError, self.parse_file_contents,
selector_file_contents)
def testSuccessfulParseOfValidv1_1FileWithAllOptionalFieldsDefined(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"sites": ["lga02"],
"client_providers": ["comcast"],
"client_countries": ["us"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.site = 'lga02'
selector_expected.client_provider = 'comcast'
selector_expected.client_country = 'us'
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_Complex(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["minimum_rtt", "download_throughput", "average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"sites": ["lga01", "lga02"],
"client_providers": ["comcast", "verizon"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selectors_expected = []
selector_base = selector.Selector()
selector_base.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_base.duration = 30 * 24 * 60 * 60
selector_base.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
sites = ['lga01', 'lga02']
client_providers = ['comcast', 'verizon']
metrics = ['minimum_rtt', 'download_throughput', 'average_rtt']
for client_provider, site, metric in itertools.product(
client_providers, sites, metrics):
selector_copy = copy.copy(selector_base)
selector_copy.metric = metric
selector_copy.client_provider = client_provider
selector_copy.site = site
selectors_expected.append(selector_copy)
self.assertParsedSelectorsMatch(selectors_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_NoLocationValues(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_CountriesCaseInsensitivity(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"start_times": ["2014-02-01T00:00:00Z"],
"client_countries": ["us", "Ca", "uK", "AU"]
}"""
selectors_expected = []
selector_base = selector.Selector()
selector_base.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_base.duration = 30 * 24 * 60 * 60
selector_base.metric = "average_rtt"
selector_base.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
for client_country in ('us', 'ca', 'uk', 'au'):
selector_copy = copy.copy(selector_base)
selector_copy.client_country = client_country
selectors_expected.append(selector_copy)
self.assertParsedSelectorsMatch(selectors_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_SingleLocationValues_onlySites(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"sites": ["lga02"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.site = 'lga02'
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_SingleLocationValues_onlyClientProviders(
self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"client_providers": ["comcast"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.client_provider = 'comcast'
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_Simple_SingleLocationValues_onlyClientCountries(
self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"client_countries": ["us"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.client_country = 'us'
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_EmptyListValue_OptionalParameter(self):
"""Empty list on optional parameter (client_countries) is handled as None"""
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"client_countries": [],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
selector_expected.client_country = None
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testValidInput_v1dot1_EmptyListValue_RequiredParameter(self):
"""An empty list for the required "metrics" field should raise an error."""
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": [],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"client_countries": ["us"],
"start_times": ["2014-02-01T00:00:00Z"]
}"""
self.assertRaises(selector.SelectorParseError, self.parse_file_contents,
selector_file_contents)
def testValidInput_v1dot1_NoOptionalValuesStillParses(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation":{
"strategy":"maxmind",
"params":{
"db_snapshots":["2014-08-04"]
}
},
"start_times": ["2014-02-01T00:00:00Z"]
}"""
selector_expected = selector.Selector()
selector_expected.start_time = utils.make_datetime_utc_aware(
datetime.datetime(2014, 2, 1))
selector_expected.duration = 30 * 24 * 60 * 60
selector_expected.metric = 'average_rtt'
selector_expected.ip_translation_spec = (
iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2014-08-04']}))
self.assertParsedSingleSelectorMatches(selector_expected,
selector_file_contents)
def testFailsParseForInvalidJson(self):
selector_file_contents = """{
"file_format_version": 1.1,
"duration": "30d",
"metrics": ["average_rtt"],
"ip_translation": {
"strategy": "maxmind",
"params": {
"db_snapshots": ["2014-08-04"]
}
},
"sites": ["lga02"],
"client_providers": ["comcast"],
"start_times": ["2014-02-01T00:00:00Z"]
"""
# The final closing curly brace is missing, so this should fail
self.assertRaises(selector.SelectorParseError, self.parse_file_contents,
selector_file_contents)
class MultiSelectorJsonEncoderTest(unittest.TestCase):
def setUp(self):
# Disable maxDiff, as diffing JSON can generate large diffs.
self.maxDiff = None
def assertJsonEqual(self, expected, actual):
self.assertDictEqual(json.loads(expected), json.loads(actual))
def testEncodeMultiSelectorOneElement(self):
s = selector.MultiSelector()
s.start_times = [datetime.datetime(2015, 4, 2, 10, 27, 34)]
s.duration = 45
s.sites = ['mia01']
s.client_providers = ['twc']
s.client_countries = ['us']
s.metrics = ['upload_throughput']
s.ip_translation_spec = (iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2015-02-05']}))
encoded_expected = """
{
"file_format_version": 1.1,
"duration": "45d",
"metrics": ["upload_throughput"],
"ip_translation": {
"strategy": "maxmind",
"params": {
"db_snapshots": ["2015-02-05"]
}
},
"sites": ["mia01"],
"client_providers": ["twc"],
"client_countries": ["us"],
"start_times": ["2015-04-02T10:27:34Z"]
}"""
encoded_actual = selector.MultiSelectorJsonEncoder().encode(s)
self.assertJsonEqual(encoded_expected, encoded_actual)
def testEncodeMultiSelectorMultiElement(self):
s = selector.MultiSelector()
s.start_times = [
datetime.datetime(2015, 4, 1, 0, 0, 0),
datetime.datetime(2015, 4, 8, 0, 0, 0),
datetime.datetime(2015, 4, 15, 0, 0, 0),
]
s.duration = 7
s.sites = ['iad01', 'lga06', 'mia01', 'nuq03']
s.client_providers = ['comcast', 'twc', 'verizon']
s.metrics = ['download_throughput', 'upload_throughput', 'minimum_rtt']
s.ip_translation_spec = (iptranslation.IPTranslationStrategySpec(
'maxmind', {'db_snapshots': ['2015-02-05']}))
encoded_expected = """
{
"file_format_version": 1.1,
"duration": "7d",
"metrics": ["download_throughput", "upload_throughput", "minimum_rtt"],
"ip_translation": {
"strategy": "maxmind",
"params": {
"db_snapshots": ["2015-02-05"]
}
},
"sites": ["iad01", "lga06", "mia01", "nuq03"],
"client_providers": ["comcast", "twc", "verizon"],
"start_times": ["2015-04-01T00:00:00Z",
"2015-04-08T00:00:00Z",
"2015-04-15T00:00:00Z"]
}"""
encoded_actual = selector.MultiSelectorJsonEncoder().encode(s)
self.assertJsonEqual(encoded_expected, encoded_actual)
if __name__ == '__main__':
unittest.main() | 0.424889 | 0.344761 |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__all__ = ('EnumOption',)
import string
import SCons.Errors
def _validator(key, val, env, vals):
if not val in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s' % (key, val))
def EnumOption(key, help, default, allowed_values, map={}, ignorecase=0):
"""
The input parameters describe a option with only certain values
allowed. They are returned with an appropriate converter and
validator appended. The result is usable for input to
Options.Add().
'key' and 'default' are the values to be passed on to Options.Add().
'help' will be appended by the allowed values automatically
'allowed_values' is a list of strings, which are allowed as values
for this option.
The 'map'-dictionary may be used for converting the input value
into canonical values (eg. for aliases).
'ignorecase' defines the behaviour of the validator:
If ignorecase == 0, the validator/converter are case-sensitive.
If ignorecase == 1, the validator/converter are case-insensitive.
If ignorecase == 2, the validator/converter is case-insensitive and
the converted value will always be lower-case.
The 'validator' tests whether the value is in the list of allowed
values. The 'converter' converts input values according to the
given 'map'-dictionary (unmapped input values are returned
unchanged).
"""
help = '%s (%s)' % (help, string.join(allowed_values, '|'))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env, vals=allowed_values: \
_validator(key, string.lower(val), env, vals)
else:
validator = lambda key, val, env, vals=allowed_values: \
_validator(key, val, env, vals)
# define converter
if ignorecase == 2:
converter = lambda val, map=map: \
string.lower(map.get(string.lower(val), val))
elif ignorecase == 1:
converter = lambda val, map=map: \
map.get(string.lower(val), val)
else:
converter = lambda val, map=map: \
map.get(val, val)
return (key, help, default, validator, converter) | src/engine/SCons/Options/EnumOption.py |
__revision__ = "__FILE__ __REVISION__ __DATE__ __DEVELOPER__"
__all__ = ('EnumOption',)
import string
import SCons.Errors
def _validator(key, val, env, vals):
if not val in vals:
raise SCons.Errors.UserError(
'Invalid value for option %s: %s' % (key, val))
def EnumOption(key, help, default, allowed_values, map={}, ignorecase=0):
"""
The input parameters describe a option with only certain values
allowed. They are returned with an appropriate converter and
validator appended. The result is usable for input to
Options.Add().
'key' and 'default' are the values to be passed on to Options.Add().
'help' will be appended by the allowed values automatically
'allowed_values' is a list of strings, which are allowed as values
for this option.
The 'map'-dictionary may be used for converting the input value
into canonical values (eg. for aliases).
'ignorecase' defines the behaviour of the validator:
If ignorecase == 0, the validator/converter are case-sensitive.
If ignorecase == 1, the validator/converter are case-insensitive.
If ignorecase == 2, the validator/converter is case-insensitive and
the converted value will always be lower-case.
The 'validator' tests whether the value is in the list of allowed
values. The 'converter' converts input values according to the
given 'map'-dictionary (unmapped input values are returned
unchanged).
"""
help = '%s (%s)' % (help, string.join(allowed_values, '|'))
# define validator
if ignorecase >= 1:
validator = lambda key, val, env, vals=allowed_values: \
_validator(key, string.lower(val), env, vals)
else:
validator = lambda key, val, env, vals=allowed_values: \
_validator(key, val, env, vals)
# define converter
if ignorecase == 2:
converter = lambda val, map=map: \
string.lower(map.get(string.lower(val), val))
elif ignorecase == 1:
converter = lambda val, map=map: \
map.get(string.lower(val), val)
else:
converter = lambda val, map=map: \
map.get(val, val)
return (key, help, default, validator, converter) | 0.533641 | 0.247192 |
# # elif a[l]==key and q>l:
# # l=q
# elif a[q]>key:
# r=q
# else:
# p=q
# q1=q
# q2=q
# while p<q1:
# q=(p+q1)//2
# if a[q]==key:
# f=q
# elif a[q]>key:
# q1=q
# else:
# p=q
# while q2<r:
# q=(r+q2)//2
# if a[q]==key:
# l=q
# elif a[q]>key:
# r=q
# else:
# q2=q
# return [f,l]
# def searchRange(self, nums: List[int], target: int) -> List[int]:
# a=nums
# key=target
# print('aaya')
# p=0
# r=len(a)-1
# q=(p+r)//2
# f=0
# l=len(a)-1
# while p<r :
# q=(p+r)//2
# if a[q]==key:
# f,l=q,q
# break
# # if a[f]==key:
# # if q<f:
# # f=q
# # #sth p,r
# # elif a[l]==key and q>l:
# # l=q
# elif a[q]>key:
# r=q
# else:
# p=q
# q1=q
# q2=q
# while p<q1:
# q=(p+q1)//2
# if a[q]==key:
# f=q
# elif a[q]>key:
# q1=q
# else:
# p=q
# while q2<r:
# q=(r+q2)//2
# if a[q]==key:
# l=q
# elif a[q]>key:
# r=q
# else:
# q2=q
# return [f,l]
def searchRange(nums, target):
a=nums
key=target
p=0
r=len(a)-1
q=(p+r)//2
f=0
l=len(a)-1
while p<r :
q=(p+r)//2
if a[q]==key:
f,l=q,q
break
# if a[f]==key:
# if q<f:
# f=q
# #sth p,r
# elif a[l]==key and q>l:
# l=q
elif a[q]>key:
r=q
else:
p=q
print(p,q,r)
q1=q
q2=q
import math
for i in range(int(math.log(len(a),2))+1):
q=(p+q1)//2
if a[q]==key:
f=q
q1=q
print(p,q,q1)
elif a[q]>key:
q1=q
else:
p=q
for i in range(int(math.log(len(a),2))+1):
q=(r+q2)//2
if a[q]==key:
l=q
q2=l
elif a[q]>key:
r=q
else:
q2=q
print(q2,q,r)
return [f,l]
nums = [1,2,2,3,3,3,4,4,4,4,5,5,5,5,8,8,8,8,8,8,8]
target = 8
print(searchRange(nums, target)) | python/searching/var_binary_search.py |
# # elif a[l]==key and q>l:
# # l=q
# elif a[q]>key:
# r=q
# else:
# p=q
# q1=q
# q2=q
# while p<q1:
# q=(p+q1)//2
# if a[q]==key:
# f=q
# elif a[q]>key:
# q1=q
# else:
# p=q
# while q2<r:
# q=(r+q2)//2
# if a[q]==key:
# l=q
# elif a[q]>key:
# r=q
# else:
# q2=q
# return [f,l]
# def searchRange(self, nums: List[int], target: int) -> List[int]:
# a=nums
# key=target
# print('aaya')
# p=0
# r=len(a)-1
# q=(p+r)//2
# f=0
# l=len(a)-1
# while p<r :
# q=(p+r)//2
# if a[q]==key:
# f,l=q,q
# break
# # if a[f]==key:
# # if q<f:
# # f=q
# # #sth p,r
# # elif a[l]==key and q>l:
# # l=q
# elif a[q]>key:
# r=q
# else:
# p=q
# q1=q
# q2=q
# while p<q1:
# q=(p+q1)//2
# if a[q]==key:
# f=q
# elif a[q]>key:
# q1=q
# else:
# p=q
# while q2<r:
# q=(r+q2)//2
# if a[q]==key:
# l=q
# elif a[q]>key:
# r=q
# else:
# q2=q
# return [f,l]
def searchRange(nums, target):
a=nums
key=target
p=0
r=len(a)-1
q=(p+r)//2
f=0
l=len(a)-1
while p<r :
q=(p+r)//2
if a[q]==key:
f,l=q,q
break
# if a[f]==key:
# if q<f:
# f=q
# #sth p,r
# elif a[l]==key and q>l:
# l=q
elif a[q]>key:
r=q
else:
p=q
print(p,q,r)
q1=q
q2=q
import math
for i in range(int(math.log(len(a),2))+1):
q=(p+q1)//2
if a[q]==key:
f=q
q1=q
print(p,q,q1)
elif a[q]>key:
q1=q
else:
p=q
for i in range(int(math.log(len(a),2))+1):
q=(r+q2)//2
if a[q]==key:
l=q
q2=l
elif a[q]>key:
r=q
else:
q2=q
print(q2,q,r)
return [f,l]
nums = [1,2,2,3,3,3,4,4,4,4,5,5,5,5,8,8,8,8,8,8,8]
target = 8
print(searchRange(nums, target)) | 0.042652 | 0.157137 |
from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import F
from django.urls import reverse
from ..cooggerapp.models.common import Common, View, Vote
from ..cooggerapp.models.utils import get_first_image
class AbstractThreadedComments(models.Model):
reply_count = models.PositiveIntegerField(default=0)
created = models.DateTimeField(auto_now_add=True, verbose_name="Created")
last_update = models.DateTimeField(auto_now_add=True, verbose_name="Last update")
class Meta:
abstract = True
class AllProperties(models.Model):
class Meta:
abstract = True
@property
def get_parent(self):
if self.reply is None:
return self
return self.__class__.objects.get(id=self.reply_id)
@property
def parent_user(self):
return self.get_parent.user
@property
def parent_permlink(self):
return self.get_parent.permlink
def is_threaded_comments(self, obj=None):
if obj is None:
return self.reply is not None
return obj.reply is not None
@property
def get_top_obj(self):
if not self.is_threaded_comments():
return self.content_type.model_class().objects.get(id=self.object_id)
for obj in self.get_all_reply_obj():
if not self.is_threaded_comments(obj[0]):
first_reply = obj[0]
return first_reply.content_type.model_class().objects.get(
id=first_reply.object_id
)
class ThreadedComments(AbstractThreadedComments, AllProperties, Common, Vote, View):
user = models.ForeignKey(User, on_delete=models.CASCADE)
permlink = models.PositiveIntegerField(default=99999999999999)
body = models.TextField()
image_address = models.URLField(null=True, blank=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
reply = models.ForeignKey(
"self", on_delete=models.CASCADE, null=True, blank=True, related_name="children"
)
depth = models.PositiveIntegerField(default=0)
to = models.ForeignKey(User, on_delete=models.CASCADE, related_name="to")
class Meta:
ordering = ["-created"]
unique_together = [["user", "permlink"]]
def __str__(self):
return f"/@{self.user}/{self.permlink}"
@property
def get_absolute_url(self):
return reverse(
"reply-detail", kwargs=dict(username=str(self.user), permlink=self.permlink)
)
@property
def is_exists(self):
return self.__class__.objects.filter(
user=self.user, permlink=self.permlink
).exists()
def generate_permlink(self):
if not self.is_exists:
queryset = self.__class__.objects.filter(user=self.user)
if not queryset.exists():
return 1
else:
return queryset.first().permlink + 1
return self.permlink
def save(self, *args, **kwargs):
if self.is_threaded_comments() and not self.is_exists:
"It is not working when update"
for obj in self.get_all_reply_obj():
obj.update(reply_count=(F("reply_count") + 1))
self.image_address = get_first_image(self.body)
self.permlink = self.generate_permlink()
self.to = self.get_to()
self.depth = self.get_parent_count()
super().save(*args, **kwargs)
def get_all_reply_obj(self):
reply_id = self.reply_id
while True:
query = self.__class__.objects.filter(id=reply_id) # get parent
if query.exists():
yield query
if self.is_threaded_comments(query[0]):
reply_id = query[0].reply_id
else:
break
else:
break
def get_parent_count(self):
reply_id = self.reply_id
parent_count = 0
while reply_id:
try:
parent = ThreadedComments.objects.get(id=reply_id)
except IndexError:
break
else:
if parent:
parent_count += 1
if parent.is_threaded_comments():
reply_id = parent.reply_id
else:
break
else:
break
return parent_count
@property
def is_reply(self):
return True
def get_to(self):
model_name = self.content_type.model
app_label = self.content_type.app_label
model = ContentType.objects.get(app_label=app_label, model=model_name)
return model.get_object_for_this_type(id=self.object_id).user | core/threaded_comment/models.py | from django.contrib.auth.models import User
from django.contrib.contenttypes.fields import GenericForeignKey
from django.contrib.contenttypes.models import ContentType
from django.db import models
from django.db.models import F
from django.urls import reverse
from ..cooggerapp.models.common import Common, View, Vote
from ..cooggerapp.models.utils import get_first_image
class AbstractThreadedComments(models.Model):
reply_count = models.PositiveIntegerField(default=0)
created = models.DateTimeField(auto_now_add=True, verbose_name="Created")
last_update = models.DateTimeField(auto_now_add=True, verbose_name="Last update")
class Meta:
abstract = True
class AllProperties(models.Model):
class Meta:
abstract = True
@property
def get_parent(self):
if self.reply is None:
return self
return self.__class__.objects.get(id=self.reply_id)
@property
def parent_user(self):
return self.get_parent.user
@property
def parent_permlink(self):
return self.get_parent.permlink
def is_threaded_comments(self, obj=None):
if obj is None:
return self.reply is not None
return obj.reply is not None
@property
def get_top_obj(self):
if not self.is_threaded_comments():
return self.content_type.model_class().objects.get(id=self.object_id)
for obj in self.get_all_reply_obj():
if not self.is_threaded_comments(obj[0]):
first_reply = obj[0]
return first_reply.content_type.model_class().objects.get(
id=first_reply.object_id
)
class ThreadedComments(AbstractThreadedComments, AllProperties, Common, Vote, View):
user = models.ForeignKey(User, on_delete=models.CASCADE)
permlink = models.PositiveIntegerField(default=99999999999999)
body = models.TextField()
image_address = models.URLField(null=True, blank=True)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
reply = models.ForeignKey(
"self", on_delete=models.CASCADE, null=True, blank=True, related_name="children"
)
depth = models.PositiveIntegerField(default=0)
to = models.ForeignKey(User, on_delete=models.CASCADE, related_name="to")
class Meta:
ordering = ["-created"]
unique_together = [["user", "permlink"]]
def __str__(self):
return f"/@{self.user}/{self.permlink}"
@property
def get_absolute_url(self):
return reverse(
"reply-detail", kwargs=dict(username=str(self.user), permlink=self.permlink)
)
@property
def is_exists(self):
return self.__class__.objects.filter(
user=self.user, permlink=self.permlink
).exists()
def generate_permlink(self):
if not self.is_exists:
queryset = self.__class__.objects.filter(user=self.user)
if not queryset.exists():
return 1
else:
return queryset.first().permlink + 1
return self.permlink
def save(self, *args, **kwargs):
if self.is_threaded_comments() and not self.is_exists:
"It is not working when update"
for obj in self.get_all_reply_obj():
obj.update(reply_count=(F("reply_count") + 1))
self.image_address = get_first_image(self.body)
self.permlink = self.generate_permlink()
self.to = self.get_to()
self.depth = self.get_parent_count()
super().save(*args, **kwargs)
def get_all_reply_obj(self):
reply_id = self.reply_id
while True:
query = self.__class__.objects.filter(id=reply_id) # get parent
if query.exists():
yield query
if self.is_threaded_comments(query[0]):
reply_id = query[0].reply_id
else:
break
else:
break
def get_parent_count(self):
reply_id = self.reply_id
parent_count = 0
while reply_id:
try:
parent = ThreadedComments.objects.get(id=reply_id)
except IndexError:
break
else:
if parent:
parent_count += 1
if parent.is_threaded_comments():
reply_id = parent.reply_id
else:
break
else:
break
return parent_count
@property
def is_reply(self):
return True
def get_to(self):
model_name = self.content_type.model
app_label = self.content_type.app_label
model = ContentType.objects.get(app_label=app_label, model=model_name)
return model.get_object_for_this_type(id=self.object_id).user | 0.606265 | 0.09556 |
import numpy as np
import matplotlib.pyplot as plt
from deepblast.dataset.utils import states2alignment, tmstate_f, states2edges
def roc_edges(true_edges, pred_edges):
truth = set(true_edges)
pred = set(pred_edges)
tp = len(truth & pred)
fp = len(pred - truth)
fn = len(truth - pred)
perc_id = tp / len(true_edges)
ppv = tp / (tp + fp)
fnr = fn / (fn + tp)
fdr = fp / (fp + tp)
return tp, fp, fn, perc_id, ppv, fnr, fdr
def roc_edges_kernel_identity(true_edges, pred_edges, kernel_width):
pe_ = pred_edges
pe = np.array(pred_edges)
for k in range(kernel_width):
pred_edges_k_pos = pe + k
pred_edges_k_neg = pe - k
pe_ += list(map(tuple, pred_edges_k_pos))
pe_ += list(map(tuple, pred_edges_k_neg))
truth = set(true_edges)
pred = set(pe_)
tp = len(truth & pred)
perc_id = tp / len(true_edges)
return perc_id
def alignment_score_kernel(true_states: str, pred_states: str,
kernel_widths: list,
query_offset: int = 0, hit_offset: int = 0):
"""
Computes ROC statistics on alignment
Parameters
----------
true_states : str
Ground truth state string
pred_states : str
Predicted state string
"""
pred_states = list(map(tmstate_f, pred_states))
true_states = list(map(tmstate_f, true_states))
pred_edges = states2edges(pred_states)
true_edges = states2edges(true_states)
# add offset to account for local alignments
true_edges = list(map(tuple, np.array(true_edges)))
pred_edges = np.array(pred_edges)
pred_edges[:, 0] += query_offset
pred_edges[:, 1] += hit_offset
pred_edges = list(map(tuple, pred_edges))
res = []
for k in kernel_widths:
r = roc_edges_kernel_identity(true_edges, pred_edges, k)
res.append(r)
return res
def alignment_score(true_states: str, pred_states: str):
"""
Computes ROC statistics on alignment
Parameters
----------
true_states : str
Ground truth state string
pred_states : str
Predicted state string
"""
pred_states = list(map(tmstate_f, pred_states))
true_states = list(map(tmstate_f, true_states))
pred_edges = states2edges(pred_states)
true_edges = states2edges(true_states)
stats = roc_edges(true_edges, pred_edges)
return stats
def alignment_visualization(truth, pred, match, gap, xlen, ylen):
""" Visualize alignment matrix
Parameters
----------
truth : torch.Tensor
Ground truth alignment
pred : torch.Tensor
Predicted alignment
match : torch.Tensor
Match matrix
gap : torch.Tensor
Gap matrix
xlen : int
Length of protein x
ylen : int
Length of protein y
Returns
-------
fig: matplotlib.pyplot.Figure
Matplotlib figure
ax : list of matplotlib.pyplot.Axes
Matplotlib axes objects
"""
fig, ax = plt.subplots(1, 4, figsize=(12, 3))
ax[0].imshow(truth[:xlen, :ylen], aspect='auto')
ax[0].set_xlabel('Positions')
ax[0].set_ylabel('Positions')
ax[0].set_title('Ground truth alignment')
im1 = ax[1].imshow(pred[:xlen, :ylen], aspect='auto')
ax[1].set_xlabel('Positions')
ax[1].set_title('Predicted alignment')
fig.colorbar(im1, ax=ax[1])
im2 = ax[2].imshow(match[:xlen, :ylen], aspect='auto')
ax[2].set_xlabel('Positions')
ax[2].set_title('Match scoring matrix')
fig.colorbar(im2, ax=ax[2])
im3 = ax[3].imshow(gap[:xlen, :ylen], aspect='auto')
ax[3].set_xlabel('Positions')
ax[3].set_title('Gap scoring matrix')
fig.colorbar(im3, ax=ax[3])
plt.tight_layout()
return fig, ax
def alignment_text(x, y, pred, truth, stats):
""" Used to visualize alignment as text
Parameters
----------
x : str
Protein X
y : str
Protein Y
pred : list of int
Predicted states
truth : list of int
Ground truth states
stats : list of float
List of statistics from roc_edges
"""
# TODO: we got the truth and prediction edges swapped somewhere earlier
true_alignment = states2alignment(truth, x, y)
pred_alignment = states2alignment(pred, x, y)
cols = ['tp', 'fp', 'fn', 'perc_id', 'ppv', 'fnr', 'fdr']
stats = list(map(lambda x: np.round(x, 2), stats))
s = list(map(lambda x: f'{x[0]}: {x[1]}', list(zip(cols, stats))))
stats_viz = ' '.join(s)
truth_viz = (
'# Ground truth\n'
f' {true_alignment[0]}\n {true_alignment[1]}'
)
pred_viz = (
'# Prediction\n'
f' {pred_alignment[0]}\n {pred_alignment[1]}'
)
s = stats_viz + '\n' + truth_viz + '\n' + pred_viz
return s | deepblast/score.py | import numpy as np
import matplotlib.pyplot as plt
from deepblast.dataset.utils import states2alignment, tmstate_f, states2edges
def roc_edges(true_edges, pred_edges):
truth = set(true_edges)
pred = set(pred_edges)
tp = len(truth & pred)
fp = len(pred - truth)
fn = len(truth - pred)
perc_id = tp / len(true_edges)
ppv = tp / (tp + fp)
fnr = fn / (fn + tp)
fdr = fp / (fp + tp)
return tp, fp, fn, perc_id, ppv, fnr, fdr
def roc_edges_kernel_identity(true_edges, pred_edges, kernel_width):
pe_ = pred_edges
pe = np.array(pred_edges)
for k in range(kernel_width):
pred_edges_k_pos = pe + k
pred_edges_k_neg = pe - k
pe_ += list(map(tuple, pred_edges_k_pos))
pe_ += list(map(tuple, pred_edges_k_neg))
truth = set(true_edges)
pred = set(pe_)
tp = len(truth & pred)
perc_id = tp / len(true_edges)
return perc_id
def alignment_score_kernel(true_states: str, pred_states: str,
kernel_widths: list,
query_offset: int = 0, hit_offset: int = 0):
"""
Computes ROC statistics on alignment
Parameters
----------
true_states : str
Ground truth state string
pred_states : str
Predicted state string
"""
pred_states = list(map(tmstate_f, pred_states))
true_states = list(map(tmstate_f, true_states))
pred_edges = states2edges(pred_states)
true_edges = states2edges(true_states)
# add offset to account for local alignments
true_edges = list(map(tuple, np.array(true_edges)))
pred_edges = np.array(pred_edges)
pred_edges[:, 0] += query_offset
pred_edges[:, 1] += hit_offset
pred_edges = list(map(tuple, pred_edges))
res = []
for k in kernel_widths:
r = roc_edges_kernel_identity(true_edges, pred_edges, k)
res.append(r)
return res
def alignment_score(true_states: str, pred_states: str):
"""
Computes ROC statistics on alignment
Parameters
----------
true_states : str
Ground truth state string
pred_states : str
Predicted state string
"""
pred_states = list(map(tmstate_f, pred_states))
true_states = list(map(tmstate_f, true_states))
pred_edges = states2edges(pred_states)
true_edges = states2edges(true_states)
stats = roc_edges(true_edges, pred_edges)
return stats
def alignment_visualization(truth, pred, match, gap, xlen, ylen):
""" Visualize alignment matrix
Parameters
----------
truth : torch.Tensor
Ground truth alignment
pred : torch.Tensor
Predicted alignment
match : torch.Tensor
Match matrix
gap : torch.Tensor
Gap matrix
xlen : int
Length of protein x
ylen : int
Length of protein y
Returns
-------
fig: matplotlib.pyplot.Figure
Matplotlib figure
ax : list of matplotlib.pyplot.Axes
Matplotlib axes objects
"""
fig, ax = plt.subplots(1, 4, figsize=(12, 3))
ax[0].imshow(truth[:xlen, :ylen], aspect='auto')
ax[0].set_xlabel('Positions')
ax[0].set_ylabel('Positions')
ax[0].set_title('Ground truth alignment')
im1 = ax[1].imshow(pred[:xlen, :ylen], aspect='auto')
ax[1].set_xlabel('Positions')
ax[1].set_title('Predicted alignment')
fig.colorbar(im1, ax=ax[1])
im2 = ax[2].imshow(match[:xlen, :ylen], aspect='auto')
ax[2].set_xlabel('Positions')
ax[2].set_title('Match scoring matrix')
fig.colorbar(im2, ax=ax[2])
im3 = ax[3].imshow(gap[:xlen, :ylen], aspect='auto')
ax[3].set_xlabel('Positions')
ax[3].set_title('Gap scoring matrix')
fig.colorbar(im3, ax=ax[3])
plt.tight_layout()
return fig, ax
def alignment_text(x, y, pred, truth, stats):
""" Used to visualize alignment as text
Parameters
----------
x : str
Protein X
y : str
Protein Y
pred : list of int
Predicted states
truth : list of int
Ground truth states
stats : list of float
List of statistics from roc_edges
"""
# TODO: we got the truth and prediction edges swapped somewhere earlier
true_alignment = states2alignment(truth, x, y)
pred_alignment = states2alignment(pred, x, y)
cols = ['tp', 'fp', 'fn', 'perc_id', 'ppv', 'fnr', 'fdr']
stats = list(map(lambda x: np.round(x, 2), stats))
s = list(map(lambda x: f'{x[0]}: {x[1]}', list(zip(cols, stats))))
stats_viz = ' '.join(s)
truth_viz = (
'# Ground truth\n'
f' {true_alignment[0]}\n {true_alignment[1]}'
)
pred_viz = (
'# Prediction\n'
f' {pred_alignment[0]}\n {pred_alignment[1]}'
)
s = stats_viz + '\n' + truth_viz + '\n' + pred_viz
return s | 0.718002 | 0.684462 |
from typing import NamedTuple, Dict, Callable, Union
import os
import json
import wandb
import torch
import random
import shutil
import tarfile
import tempfile
import numpy as np
from pathlib import Path
from loguru import logger
from copy import deepcopy
from functools import wraps
import torch.distributed as dist
import vae_lm.training.ddp as ddp
from contextlib import contextmanager
from torch_nlp_utils.common import Params
from vae_lm.utils.base import run_on_rank_zero
# Modules
from vae_lm.models.base import VAELmModel
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "weights.pt"
METRICS_NAME = "metrics.json"
class TorchBatchError(Exception):
"""
This exception is raised during any batch processing.
It has an attribute `batch` to get a tensor that raised an error.
"""
def __init__(self, message: str, batch: torch.Tensor):
self.message = message
self.batch = batch
def __str__(self):
return repr(self.message)
class Archive(NamedTuple):
"""An archive comprises a Model and its experimental config with metrics."""
model: VAELmModel
config: Params
metrics: Dict[str, float]
def archive_model(
serialization_dir: Path,
weights: Path,
archive_path: Path = None,
) -> None:
"""
Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`.
Parameters
----------
serialization_dir : `Path`, required
The directory where the weights and vocabulary are written out.
weights : `Path`, required
Which weights file to include in the archive. The default is `best.th`.
archive_path : `str`, optional, (default = `None`)
A full path to serialize the model to. The default is "model.tar.gz" inside the
serialization_dir. If you pass a directory here, we'll serialize the model
to "model.tar.gz" inside the directory.
"""
# Check weights
weights_file = weights / "model.pt"
if not weights_file.exists():
logger.error(f"weights file {weights_file} does not exist, unable to archive model.")
return
# Check metrics
metrics_file = weights / METRICS_NAME
if not metrics_file.exists():
logger.error(f"metrics file {metrics_file} does not exist, unable to archive model.")
return
# Check config
config_file = serialization_dir / CONFIG_NAME
if not config_file.exists():
logger.error(f"config file {config_file} does not exist, unable to archive model.")
# Check archive path
if archive_path is not None:
archive_file = archive_path
if archive_file.is_dir():
archive_file = archive_file / "model.tar.gz"
else:
archive_file = serialization_dir / "model.tar.gz"
logger.info(f"Archiving data to {archive_file}.")
with tarfile.open(archive_file, "w:gz") as archive:
archive.add(config_file, arcname=CONFIG_NAME)
archive.add(weights_file, arcname=WEIGHTS_NAME)
archive.add(metrics_file, arcname=METRICS_NAME)
archive.add(str(serialization_dir / "vocabulary"), arcname="vocabulary")
def load_archive(
archive_file: Path,
cuda_device: int = -1,
) -> Archive:
"""
Instantiates an Archive from an archived `tar.gz` file.
Parameters
----------
archive_file : `Path`, required
The archive file to load the model from.
cuda_device : `int`, optional (default = `-1`)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
"""
logger.info(f"Loading archive file {archive_file}")
tempdir = None
try:
if archive_file.is_dir():
serialization_dir = archive_file
else:
with extracted_archive(archive_file, cleanup=False) as tempdir:
serialization_dir = Path(tempdir)
weights_path = serialization_dir / WEIGHTS_NAME
# Load config
config = Params.from_file(str(serialization_dir / CONFIG_NAME))
# Load metrics
with (serialization_dir / METRICS_NAME).open("r", encoding="utf-8") as file:
metrics = json.load(file)
# Instantiate model. Use a duplicate of the config, as it will get consumed.
model_params = config.duplicate()
model_params["vocabulary"] = str(serialization_dir / "vocabulary")
model = VAELmModel.load(
model_params,
weights=weights_path,
device=cuda_device,
)
finally:
if tempdir is not None:
logger.info(f"Removing temporary unarchived model dir at {tempdir}")
shutil.rmtree(tempdir, ignore_errors=True)
return Archive(
model=model,
config=config,
metrics=metrics,
)
@contextmanager
def extracted_archive(resolved_archive_file, cleanup=True):
tempdir = None
try:
tempdir = tempfile.mkdtemp()
logger.info(f"Extracting archive file {resolved_archive_file} to temp dir {tempdir}")
with tarfile.open(resolved_archive_file, "r:gz") as archive:
archive.extractall(tempdir)
yield tempdir
finally:
if tempdir is not None and cleanup:
logger.info(f"Removing temporary unarchived model dir at {tempdir}")
shutil.rmtree(tempdir, ignore_errors=True)
def seed_everything(seed: int) -> None:
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def configure_world(func: Callable) -> Callable:
"""Decorator to configure Distributed Training world and wandb if needed for function."""
@wraps(func)
def wrapper(process_rank: int, config: Params, world_size: int = 1, **kwargs) -> None:
# Set info related to process rank
is_master = process_rank == 0
os.environ["LOCAL_RANK"] = str(process_rank)
use_wandb = config.pop("use_wandb", False)
serialization_dir = Path(config["serialization_dir"])
# Setup world for Distributed Training
if world_size > 1:
ddp.setup_world(process_rank, world_size, backend=dist.Backend.NCCL)
# Run wandb in master process
# TODO: Think about config unflat for wandb sweep to work for hyperparameters optimization.
if is_master and use_wandb:
logger.use_wandb = True
wandb.init(
project=os.getenv("WANDB_PROJECT_NAME"),
config=config.as_flat_dict(),
reinit=True,
tags=config.pop("tags"),
)
# Run function
try:
result = func(process_rank=process_rank, config=config, world_size=world_size, **kwargs)
except Exception as error:
# If it is a TorchBatchError then save it for convenience
if isinstance(error, TorchBatchError):
logger.bind(
batch=error.batch, serialization_dir=serialization_dir.stem, message=str(error)
).debug("Saving batch that caused an error")
logger.error(error)
result = {}
finally:
if is_master:
# Construct archive in distributed training there
# because wandb hangs in distributed training mode
# and we also need to finish it manually.
best_model = serialization_dir / "best-model"
if best_model.exists():
archive_model(
serialization_dir=serialization_dir,
weights=best_model,
)
if use_wandb:
# Save archived model to wandb if exists
if best_model.exists():
wandb.save(str(serialization_dir / "model.tar.gz"))
wandb.finish()
return result
wrapper.original = func
return wrapper
def description_from_metrics(metrics: Dict[str, float]) -> str:
# Copy dict for safety
metrics = deepcopy(metrics)
# Configure loss first
loss = f"loss: {metrics.pop('loss'):.4f}, "
return loss + ", ".join([f"{name}: {value:.4f}" for name, value in metrics.items()]) + " ||"
@run_on_rank_zero
def log_metrics(
mode_str: str,
metrics: Dict[str, float],
info: Dict[str, Union[float, int, str]] = None,
) -> None:
"""
Pretty log metrics and sort them by length and alphabetic order.
Parameters
----------
mode_str : `str`, required
Mode string. Usually train or validation.
metrics : `Dict[str, float]`, required
Dictionary of metrics.
info : `Dict[str, Union[float, int, str]]`, optional (default = `None`)
Info to additionally log after and epoch.
"""
logger.info(
f"{mode_str}: info -- {', '.join([f'{k}: {v}'.lower() for k, v in info.items()])}"
if info is not None
else f"{mode_str}"
)
max_length = max(len(x) for x in metrics)
# Sort by length to make it prettier
for metric in sorted(metrics, key=lambda x: (len(x), x)):
metric_value = metrics.get(metric)
# Log only numbers to stdout as with additional loggers
# we might want to log DataFrames, distributions and etc.
if isinstance(metric_value, (float, int)):
logger.info(f"{metric.ljust(max_length)} | {metric_value:.4f}")
logger.bind(metrics={f"{mode_str.lower()}/{k}": v for k, v in metrics.items()}).debug(
"Logging metrics to additional sources."
) | vae_lm/training/utils.py | from typing import NamedTuple, Dict, Callable, Union
import os
import json
import wandb
import torch
import random
import shutil
import tarfile
import tempfile
import numpy as np
from pathlib import Path
from loguru import logger
from copy import deepcopy
from functools import wraps
import torch.distributed as dist
import vae_lm.training.ddp as ddp
from contextlib import contextmanager
from torch_nlp_utils.common import Params
from vae_lm.utils.base import run_on_rank_zero
# Modules
from vae_lm.models.base import VAELmModel
CONFIG_NAME = "config.json"
WEIGHTS_NAME = "weights.pt"
METRICS_NAME = "metrics.json"
class TorchBatchError(Exception):
"""
This exception is raised during any batch processing.
It has an attribute `batch` to get a tensor that raised an error.
"""
def __init__(self, message: str, batch: torch.Tensor):
self.message = message
self.batch = batch
def __str__(self):
return repr(self.message)
class Archive(NamedTuple):
"""An archive comprises a Model and its experimental config with metrics."""
model: VAELmModel
config: Params
metrics: Dict[str, float]
def archive_model(
serialization_dir: Path,
weights: Path,
archive_path: Path = None,
) -> None:
"""
Archive the model weights, its training configuration, and its vocabulary to `model.tar.gz`.
Parameters
----------
serialization_dir : `Path`, required
The directory where the weights and vocabulary are written out.
weights : `Path`, required
Which weights file to include in the archive. The default is `best.th`.
archive_path : `str`, optional, (default = `None`)
A full path to serialize the model to. The default is "model.tar.gz" inside the
serialization_dir. If you pass a directory here, we'll serialize the model
to "model.tar.gz" inside the directory.
"""
# Check weights
weights_file = weights / "model.pt"
if not weights_file.exists():
logger.error(f"weights file {weights_file} does not exist, unable to archive model.")
return
# Check metrics
metrics_file = weights / METRICS_NAME
if not metrics_file.exists():
logger.error(f"metrics file {metrics_file} does not exist, unable to archive model.")
return
# Check config
config_file = serialization_dir / CONFIG_NAME
if not config_file.exists():
logger.error(f"config file {config_file} does not exist, unable to archive model.")
# Check archive path
if archive_path is not None:
archive_file = archive_path
if archive_file.is_dir():
archive_file = archive_file / "model.tar.gz"
else:
archive_file = serialization_dir / "model.tar.gz"
logger.info(f"Archiving data to {archive_file}.")
with tarfile.open(archive_file, "w:gz") as archive:
archive.add(config_file, arcname=CONFIG_NAME)
archive.add(weights_file, arcname=WEIGHTS_NAME)
archive.add(metrics_file, arcname=METRICS_NAME)
archive.add(str(serialization_dir / "vocabulary"), arcname="vocabulary")
def load_archive(
archive_file: Path,
cuda_device: int = -1,
) -> Archive:
"""
Instantiates an Archive from an archived `tar.gz` file.
Parameters
----------
archive_file : `Path`, required
The archive file to load the model from.
cuda_device : `int`, optional (default = `-1`)
If `cuda_device` is >= 0, the model will be loaded onto the
corresponding GPU. Otherwise it will be loaded onto the CPU.
"""
logger.info(f"Loading archive file {archive_file}")
tempdir = None
try:
if archive_file.is_dir():
serialization_dir = archive_file
else:
with extracted_archive(archive_file, cleanup=False) as tempdir:
serialization_dir = Path(tempdir)
weights_path = serialization_dir / WEIGHTS_NAME
# Load config
config = Params.from_file(str(serialization_dir / CONFIG_NAME))
# Load metrics
with (serialization_dir / METRICS_NAME).open("r", encoding="utf-8") as file:
metrics = json.load(file)
# Instantiate model. Use a duplicate of the config, as it will get consumed.
model_params = config.duplicate()
model_params["vocabulary"] = str(serialization_dir / "vocabulary")
model = VAELmModel.load(
model_params,
weights=weights_path,
device=cuda_device,
)
finally:
if tempdir is not None:
logger.info(f"Removing temporary unarchived model dir at {tempdir}")
shutil.rmtree(tempdir, ignore_errors=True)
return Archive(
model=model,
config=config,
metrics=metrics,
)
@contextmanager
def extracted_archive(resolved_archive_file, cleanup=True):
tempdir = None
try:
tempdir = tempfile.mkdtemp()
logger.info(f"Extracting archive file {resolved_archive_file} to temp dir {tempdir}")
with tarfile.open(resolved_archive_file, "r:gz") as archive:
archive.extractall(tempdir)
yield tempdir
finally:
if tempdir is not None and cleanup:
logger.info(f"Removing temporary unarchived model dir at {tempdir}")
shutil.rmtree(tempdir, ignore_errors=True)
def seed_everything(seed: int) -> None:
random.seed(seed)
os.environ["PYTHONHASHSEED"] = str(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
def configure_world(func: Callable) -> Callable:
"""Decorator to configure Distributed Training world and wandb if needed for function."""
@wraps(func)
def wrapper(process_rank: int, config: Params, world_size: int = 1, **kwargs) -> None:
# Set info related to process rank
is_master = process_rank == 0
os.environ["LOCAL_RANK"] = str(process_rank)
use_wandb = config.pop("use_wandb", False)
serialization_dir = Path(config["serialization_dir"])
# Setup world for Distributed Training
if world_size > 1:
ddp.setup_world(process_rank, world_size, backend=dist.Backend.NCCL)
# Run wandb in master process
# TODO: Think about config unflat for wandb sweep to work for hyperparameters optimization.
if is_master and use_wandb:
logger.use_wandb = True
wandb.init(
project=os.getenv("WANDB_PROJECT_NAME"),
config=config.as_flat_dict(),
reinit=True,
tags=config.pop("tags"),
)
# Run function
try:
result = func(process_rank=process_rank, config=config, world_size=world_size, **kwargs)
except Exception as error:
# If it is a TorchBatchError then save it for convenience
if isinstance(error, TorchBatchError):
logger.bind(
batch=error.batch, serialization_dir=serialization_dir.stem, message=str(error)
).debug("Saving batch that caused an error")
logger.error(error)
result = {}
finally:
if is_master:
# Construct archive in distributed training there
# because wandb hangs in distributed training mode
# and we also need to finish it manually.
best_model = serialization_dir / "best-model"
if best_model.exists():
archive_model(
serialization_dir=serialization_dir,
weights=best_model,
)
if use_wandb:
# Save archived model to wandb if exists
if best_model.exists():
wandb.save(str(serialization_dir / "model.tar.gz"))
wandb.finish()
return result
wrapper.original = func
return wrapper
def description_from_metrics(metrics: Dict[str, float]) -> str:
# Copy dict for safety
metrics = deepcopy(metrics)
# Configure loss first
loss = f"loss: {metrics.pop('loss'):.4f}, "
return loss + ", ".join([f"{name}: {value:.4f}" for name, value in metrics.items()]) + " ||"
@run_on_rank_zero
def log_metrics(
mode_str: str,
metrics: Dict[str, float],
info: Dict[str, Union[float, int, str]] = None,
) -> None:
"""
Pretty log metrics and sort them by length and alphabetic order.
Parameters
----------
mode_str : `str`, required
Mode string. Usually train or validation.
metrics : `Dict[str, float]`, required
Dictionary of metrics.
info : `Dict[str, Union[float, int, str]]`, optional (default = `None`)
Info to additionally log after and epoch.
"""
logger.info(
f"{mode_str}: info -- {', '.join([f'{k}: {v}'.lower() for k, v in info.items()])}"
if info is not None
else f"{mode_str}"
)
max_length = max(len(x) for x in metrics)
# Sort by length to make it prettier
for metric in sorted(metrics, key=lambda x: (len(x), x)):
metric_value = metrics.get(metric)
# Log only numbers to stdout as with additional loggers
# we might want to log DataFrames, distributions and etc.
if isinstance(metric_value, (float, int)):
logger.info(f"{metric.ljust(max_length)} | {metric_value:.4f}")
logger.bind(metrics={f"{mode_str.lower()}/{k}": v for k, v in metrics.items()}).debug(
"Logging metrics to additional sources."
) | 0.843412 | 0.168891 |
import sys
from kikit.doc import runBoardExample, runBoardExampleJoin
from pcbnewTransition import pcbnew
counter = 0
def autoName():
global counter
counter += 1
return f"examplePanel{counter}"
SRC = "doc/resources/conn.kicad_pcb"
print(
"""
# Examples
This document will show you several examples of KiKit CLI for panelization. Note
that this is **not an exhaustive description** of everything that KiKit can do,
nor proper documentation. For further details, please refer to:
- [installation guide](installation.md)
- [description of all panelization options](panelizeCli.md)
- [more detail about KiKit's algorithm for tab creation](understandingTabs.md)
- [reference for the Python interface](panelization.md)
We will show everything on a single board located in
`doc/resources/conn.kicad_pcb`. The board looks like this when rendered via
PcbDraw:

""")
print(
"""
# Basic panels & layout
Let's start with our first panel.
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2;"],
["--tabs", "full"],
["--cuts", "vcuts"],
[SRC]])
print(
"""
We specified that we want 2x2 panel, no space between board and separate them by
V-cuts. We also specified that we want to build full tabs (although no tabs are
visible in this example). This is ,however, essential โ if we omitted tabs, no
cuts between the boards would be performed. Note, that due to the rounded
corners, this panel cannot be manufactured. We will fix it later.
Note that the `\` in the input is there to make shell happy, so we can break our
command into multiple lines. Also note that there are single quotes around the
key-value pair โ again, to make shell happy and to interpret a string with
spaces as a single option.
Note that **on Windows you have the enter the commands into KiCAD Command
Prompt** instead of the regular Command Prompt. You can find it under the Start
menu.
Also note that KiKit accepts all options in categories (e.g., `layout`, `tabs`,
`cuts`, ...). You can specify the parameters as a semicolon-separated key-value
list. To learn about the precise syntax of the CLI and about all options, please
refer to โ [documentation](panelizeCli.md).
One side note โ if you try it with your own board some components might be gone.
KiKit respects the KiCAD component selection criteria. When you specify an input
rectangle, only the components that **fully fit** inside the input rectangle are
selected. This however take in account **both name and value labels** (even when
they are hidden).
When you do not specify the source are explicitly, KiKit takes the board outline
bounding box as the source area. Therefore, by default, components outside the
board substrate are not copied to panel.
Note that this is intended behavior; for once it is consistent with KiCAD
behavior of user selection and also it allows to easily ignore surrounding
comments and drawings in the board sheet (it makes no sense to have 12 same
copies of the notes around the board).
How to include the missing components?
- specify the source area explicitly to include all your components
- specify `--source 'tolerance: 10mm'` to enlarge the board outline bounding box
by e.g. 10 mm. The default value is 5 mm.
I told you that the panel above is not suitable for manufacturing. Let's see
why:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2;"],
["--tabs", "full"],
["--cuts", "vcuts"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
We specified a milling simulation post-processing. This simulates the milling
operation in the fab house. As you can see, the sharp internal corners cannot be
manufactured. I recommend you to use milling postprocessing always โ you can
easily see if your cuts are off or you have too narrow slots in your design.
Usually, one would use full tabs only for rectangular boards. Usually, when you
have rounded corners, you will use short tabs instead and add some space between
the boards. So let's fix it:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; hwidth: 10mm; vwidth: 15mm"],
["--cuts", "vcuts"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
In that way, the rounded corners can be machined. Lets' see the same example
with mousebites instead:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 5mm"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm"],
[SRC]])
print(
"""
We changed cut type to mousebites and we specified that they should be
performed by 0.5mm holes with a spacing of 1 mm. You could also use inches if
you want โ just specify `<number>in. Since we use mousebites, we used narrower
tabs. We also specified that the cuts should be inset 0.25 mm into the board
outline. This is suitable when your board should fit into a cover โ when you
break away the tabs, all burs will be inside the intended board outline.
What happens, when we simulate the milling operation?
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 5mm"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm"],
["--post", "millradius: 1mm"],
[SRC]])
print(
"""
See? The cuts are somewhat short. This is due to the internal corners that
cannot be milled. KiKit can fix that for you โ just specify you want to prolong
your cuts tangentially by a small amount:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
If you want, you can also specify a number of tabs to generate. KiKit will place
them evenly:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
You can also append frame or rails to the panel. Frames and rail are useful in
the following situations:
- you want to assemble your board, so you need tooling holes, fiducial.
- you want to append a text to board (e.g., to identify a manufacturing batch)
- your boards are not rectangluar and you want to use V-Cuts (most manufactures
require the outer edge of the panel to be a rectangle in order to manufacture
V-Cuts)
Let's start with rails:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
Similarly, you can add left and right rail via the `railslr` type. If you want
a full frame, use the type `frame`. When you place a full frame, it might make
sense to include cuts in the corner of the frame, so you can break it apart
easily. Let's see an example:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "frame; width: 5mm; space: 3mm; cuts: both"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
Note that you can also use just only a vertical or horizontal frame cuts:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "frame; width: 5mm; space: 3mm; cuts: h"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
When you use V-cuts it might make sense to not remove all material, but only
mill a slot around the board of the board. This yields a stronger panel โ and
some manufacturers require such style for assembly with V-Cuts. This is achieved
via framing type `tightframe`. Note that it does not make much sense with
mousebites.
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 6mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "vcuts"],
["--framing", "tightframe; width: 5mm; space: 3mm; "],
["--post", "millradius: 1mm"],
[SRC]])
print("""
Once we have a frame, we can append a tooling holes, fiducials and some text to
it:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--tooling", "3hole; hoffset: 2.5mm; voffset: 2.5mm; size: 1.5mm"],
["--fiducials", "3fid; hoffset: 5mm; voffset: 2.5mm; coppersize: 2mm; opening: 1mm;"],
["--text", "simple; text: yaqwsx's panel; anchor: mt; voffset: 2.5mm; hjustify: center; vjustify: center;"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
There are many options for text and fiducials. Be sure to read the [full
documentation](panelizeCli.md).
If you have an automatic feeder in your PNP machine or you just dislike
sharp corners, you can add a chamfer or a fillet to the panel frame/rails:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm; fillet: 1mm"],
["--tooling", "3hole; hoffset: 2.5mm; voffset: 2.5mm; size: 1.5mm"],
["--fiducials", "3fid; hoffset: 5mm; voffset: 2.5mm; coppersize: 2mm; opening: 1mm;"],
["--post", "millradius: 1mm"],
[SRC]])
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm; chamfer: 1mm"],
["--tooling", "3hole; hoffset: 2.5mm; voffset: 2.5mm; size: 1.5mm"],
["--fiducials", "3fid; hoffset: 5mm; voffset: 2.5mm; coppersize: 2mm; opening: 1mm;"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
# Advanced features & layouts
It is possible that you have some critical features you want to avoid with tabs.
KiKit has several features that can help you. Let's start with the simple ones.
First, you can rotate the boards in your layout. This might make not much sense
for rectanglar boards, but it might save you when you have circular or oddly
shaped boards:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 0mm; rotation: 45deg;"],
["--tabs", "fixed; width: 3mm;"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.75mm"],
["--framing", "frame; width: 5mm; space: 3mm; cuts: both"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
When your board has a connector sticking one one side of the board, it makes
sense to rotate the boards every other column, row or combination of both. KiKit
supports this via layout option `alternation`. You should be careful about
component references when rotating boards โ KiCAD's references have a property
"Stay upright" which makes them always face up (even when placed on a panel). So
be sure to turn it off before panelizing. Here's an example:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 3mm; alternation: cols;"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "frame; width: 5mm; space: 3mm; cuts: both"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
Another solution might be to not put tabs on, e.g., vertical edges of the PCB.
However, in that case your panel might be weak for further assembly. You can
make it more stiff by including backbones โ a full piece of substrate between
the panels. Note that adding a backbone does not extend space between boards -
that's up to you. You can add either vertical, horizontal or both backbones.
Also, similarly with frames, you can put cuts on your backbone to make
depanelization of your boards easier. Enough theory, let's see an example
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; hspace: 2mm; vspace: 9mm; hbackbone: 5mm; hbonecut: true"],
["--tabs", "fixed; width: 3mm; vcount: 2; hcount: 0"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
The most powerful feature of KiKit regarding tab placement are tabs via
annotation. Remember our test board? When you open it in Pcbnew, you can see
that there are some special footprints โ KiKit's annotations:

They specify where to place tabs. You can even specify individual tab width via
text property of the symbol. How to use it? Just specify tab type to
`annotation`. We also have to increase the source area tolerance, so it can
capture the annotations.
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 5mm;"],
["--tabs", "annotation"],
["--source", "tolerance: 15mm"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
Well, the panel looks strange โ right? That's because KiKit always constructs a
half-bridges. When you specify the tabs location, you have to either ensure they
match or put a piece of substrate they can reach โ e.g., a backbone or a
tightframe. If you are interested in the details, read more about tabs in
section [Understanding tabs](understandingTabs.md). Let's fix it:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 8mm; hbackbone: 3mm; vbackbone: 3mm"],
["--tabs", "annotation"],
["--source", "tolerance: 15mm"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--post", "millradius: 1mm"],
[SRC]])
print("""
Note that the annotation can have an arbitrary orientation. The arrow just must
be outside board edge and points towards it. KiKit will also place only those
tabs, that have a neighboring substrate. For precise algorithm, see section
[understanding tabs](understandingTabs.md).
When you make flex PCBs or you want to save etchant, it make sense to pour
copper on all non-functional parts of the panel. It will make the PCB rigid. You
can do so via `copperfill` post-processing operation:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm;"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--post", "millradius: 1mm; copperfill: true"],
[SRC]])
print("""
When you use V-cuts with `copperfill` you (or your fab house) might want to
include a clearance around the V-cuts:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; hwidth: 10mm; vwidth: 15mm"],
["--cuts", "vcuts; clearance: 1.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--post", "millradius: 1mm; copperfill: true"],
[SRC]])
print("""
Note one last facts about V-cuts. V-cuts can only be straight and
horizontal/vertical. But you can use them with circular boards if you want by
cutting a little inside them. The option `cutcurves`, that will approximate the
cut by staring and ending point.
# I would like... but KiKit does not support it!
If you need something special; e.g., custom placement of tooling holes, multiple
texts, etc. KiKit has you covered.
The CLI interface allows you to run a custom script over the final panel. The
script can use KiKit Python interface to modify it. For the sake of simplicity,
let's add a hole in the middle of the frame. Therefore, we write the following
script:
```.py
from kikit.units import mm
from pcbnew import wxPoint
def kikitPostprocess(panel, arg):
minx, miny, maxx, maxy = panel.panelBBox()
position = wxPoint((minx + maxx) / 2, miny + 2 * mm)
panel.addNPTHole(position, 3 * mm)
```
Then run KiKit:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--post", "millradius: 1mm; script: doc/resources/examplePost.py"],
[SRC]])
print("""
You can learn more about available functions from the comment in the source code
or in [documentation](panelization.md).
If you implement a feature that your fab house requires (e.g., new tooling hole
type), consider submitting a pull request for KiKit instead. I believe the
others will benefit from it.
# Managing presets
The last section of this document is dedicated to management of presets. You can
read the specification in the [documentation for CLI](panelizeCli.md). Here I
would like to focus on practical examples.
As you should know from the documentation, the panelization preset is divided
into sections; e. g., `layout`, `tabs`, etc. The key-value parameters in these
sections can be specified via JSON files. In KiKit, you can specify these files
via `-p` option:
```
kikit panelize -p myPreset.json -p :<builtInPreset> <other parameters>
```
The parameters in the later specified presets override the parameters in the
previously specified presets. This allows you to define a named piece-wise
presets. Therefore, you can prepare various presets for mousebites โ e.g.,
`fineMousebites.json` and `coarseMousebites.json`:
```.js
// fineMousebites.json
{
"cuts": {
"type": "mousebites",
"drill": "0.5mm",
"spacing": "0.9mm",
"offset": "0.25mm"
}
}
// coarseMousebites.json
{
"cuts": {
"type": "mousebites",
"drill": "0.3mm",
"spacing": "0.2mm",
"offset": "0.15mm"
}
}
```
Then you can specify your panelization commands easily via:
```
kikit panelize -p fineMousebites.json <otheroptions>
```
Therefore, you can build a custom library of commonly used-options; e.g., per
fabrication house. KiKit offers some built-in presets โ see
[`panelizePresets`](../kikit/resources/panelizePresets). Note that the built-in
preset `default.json` is always used as a base and it specifies conservative
default values so you can only override the options relevant for you.
To give you an example โ with KiKit, you will no longer have to remember what
diameter of tooling holes JLC PCB requires, just use:
```
kikit panelize -p :jlcTooling <otheroptions>
```
""")
runBoardExampleJoin() | scripts/exampleDoc.py |
Then run KiKit:
""")
runBoardExample(autoName(),
[["panelize"],
["--layout", "grid; rows: 2; cols: 2; space: 2mm"],
["--tabs", "fixed; width: 3mm; vcount: 2"],
["--cuts", "mousebites; drill: 0.5mm; spacing: 1mm; offset: 0.2mm; prolong: 0.5mm"],
["--framing", "railstb; width: 5mm; space: 3mm;"],
["--post", "millradius: 1mm; script: doc/resources/examplePost.py"],
[SRC]])
print("""
You can learn more about available functions from the comment in the source code
or in [documentation](panelization.md).
If you implement a feature that your fab house requires (e.g., new tooling hole
type), consider submitting a pull request for KiKit instead. I believe the
others will benefit from it.
# Managing presets
The last section of this document is dedicated to management of presets. You can
read the specification in the [documentation for CLI](panelizeCli.md). Here I
would like to focus on practical examples.
As you should know from the documentation, the panelization preset is divided
into sections; e. g., `layout`, `tabs`, etc. The key-value parameters in these
sections can be specified via JSON files. In KiKit, you can specify these files
via `-p` option:
The parameters in the later specified presets override the parameters in the
previously specified presets. This allows you to define a named piece-wise
presets. Therefore, you can prepare various presets for mousebites โ e.g.,
`fineMousebites.json` and `coarseMousebites.json`:
Then you can specify your panelization commands easily via:
Therefore, you can build a custom library of commonly used-options; e.g., per
fabrication house. KiKit offers some built-in presets โ see
[`panelizePresets`](../kikit/resources/panelizePresets). Note that the built-in
preset `default.json` is always used as a base and it specifies conservative
default values so you can only override the options relevant for you.
To give you an example โ with KiKit, you will no longer have to remember what
diameter of tooling holes JLC PCB requires, just use:
| 0.556761 | 0.660898 |
"""Tests for the unconstrained BFGS optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from scipy.stats import special_ortho_group
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
def _make_val_and_grad_fn(value_fn):
@functools.wraps(value_fn)
def val_and_grad(x):
return tfp.math.value_and_gradient(value_fn, x)
return val_and_grad
def _norm(x):
return np.linalg.norm(x, np.inf)
@test_util.run_all_in_graph_and_eager_modes
class BfgsTest(tf.test.TestCase):
"""Tests for BFGS optimization algorithm."""
def test_quadratic_bowl_2d(self):
"""Can minimize a two dimensional quadratic function."""
minimum = np.array([1.0, 1.0])
scales = np.array([2.0, 3.0])
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.constant([0.6, 0.8])
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_inverse_hessian_spec(self):
"""Checks that specifying the 'initial_inverse_hessian_estimate' works."""
minimum = np.array([1.0, 1.0], dtype=np.float32)
scales = np.array([2.0, 3.0], dtype=np.float32)
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.constant([0.6, 0.8])
test_inv_hessian = tf.constant([[2.0, 1.0], [1.0, 2.0]],
dtype=np.float32)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8,
initial_inverse_hessian_estimate=test_inv_hessian))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_bad_inverse_hessian_spec(self):
"""Checks that specifying a non-positive definite inverse hessian fails."""
minimum = np.array([1.0, 1.0], dtype=np.float32)
scales = np.array([2.0, 3.0], dtype=np.float32)
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.constant([0.6, 0.8])
bad_inv_hessian = tf.constant([[-2.0, 1.0], [1.0, -2.0]],
dtype=tf.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8,
initial_inverse_hessian_estimate=bad_inv_hessian))
def test_asymmetric_inverse_hessian_spec(self):
"""Checks that specifying a asymmetric inverse hessian fails."""
minimum = np.array([1.0, 1.0], dtype=np.float32)
scales = np.array([2.0, 3.0], dtype=np.float32)
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.constant([0.6, 0.8])
bad_inv_hessian = tf.constant([[2.0, 0.0], [1.0, 2.0]],
dtype=tf.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8,
initial_inverse_hessian_estimate=bad_inv_hessian))
def test_quadratic_bowl_10d(self):
"""Can minimize a ten dimensional quadratic function."""
dim = 10
np.random.seed(14159)
minimum = np.random.randn(dim)
scales = np.exp(np.random.randn(dim))
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.ones_like(minimum)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_quadratic_with_skew(self):
"""Can minimize a general quadratic function."""
dim = 3
np.random.seed(26535)
minimum = np.random.randn(dim)
principal_values = np.diag(np.exp(np.random.randn(dim)))
rotation = special_ortho_group.rvs(dim)
hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
@_make_val_and_grad_fn
def quadratic(x):
y = x - minimum
yp = tf.tensordot(hessian, y, axes=[1, 0])
return tf.reduce_sum(input_tensor=y * yp) / 2
start = tf.ones_like(minimum)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_quadratic_with_strong_skew(self):
"""Can minimize a strongly skewed quadratic function."""
np.random.seed(89793)
minimum = np.random.randn(3)
principal_values = np.diag(np.array([0.1, 2.0, 50.0]))
rotation = special_ortho_group.rvs(3)
hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
@_make_val_and_grad_fn
def quadratic(x):
y = x - minimum
yp = tf.tensordot(hessian, y, axes=[1, 0])
return tf.reduce_sum(input_tensor=y * yp) / 2
start = tf.ones_like(minimum)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
print (final_gradient_norm)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_rosenbrock_2d(self):
"""Tests BFGS on the Rosenbrock function.
The Rosenbrock function is a standard optimization test case. In two
dimensions, the function is (a, b > 0):
f(x, y) = (a - x)^2 + b (y - x^2)^2
The function has a global minimum at (a, a^2). This minimum lies inside
a parabolic valley (y = x^2).
"""
def rosenbrock(coord):
"""The Rosenbrock function in two dimensions with a=1, b=100.
Args:
coord: A Tensor of shape [2]. The coordinate of the point to evaluate
the function at.
Returns:
fv: A scalar tensor containing the value of the Rosenbrock function at
the supplied point.
dfx: Scalar tensor. The derivative of the function with respect to x.
dfy: Scalar tensor. The derivative of the function with respect to y.
"""
x, y = coord[0], coord[1]
fv = (1 - x)**2 + 100 * (y - x**2)**2
dfx = 2 * (x - 1) + 400 * x * (x**2 - y)
dfy = 200 * (y - x**2)
return fv, tf.stack([dfx, dfy])
start = tf.constant([-1.2, 1.0])
results = self.evaluate(tfp.optimizer.bfgs_minimize(
rosenbrock, initial_position=start, tolerance=1e-5))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-5)
self.assertArrayNear(results.position, np.array([1.0, 1.0]), 1e-5)
# TODO(b/116767573): Also run in eager mode but as a separate test, otherwise
# it takes too long to run.
def test_himmelblau(self):
"""Tests minimization on the Himmelblau's function.
Himmelblau's function is a standard optimization test case. The function is
given by:
f(x, y) = (x^2 + y - 11)^2 + (x + y^2 - 7)^2
The function has four minima located at (3, 2), (-2.805118, 3.131312),
(-3.779310, -3.283186), (3.584428, -1.848126).
All these minima may be reached from appropriate starting points.
"""
@_make_val_and_grad_fn
def himmelblau(coord):
x, y = coord[0], coord[1]
return (x * x + y - 11) ** 2 + (x + y * y - 7) ** 2
starts_and_targets = [
# Start Point, Target Minimum, Num evaluations expected.
[(1, 1), (3, 2), 30],
[(-2, 2), (-2.805118, 3.131312), 23],
[(-1, -1), (-3.779310, -3.283186), 29],
[(1, -2), (3.584428, -1.848126), 28]
]
dtype = "float64"
for start, expected_minima, expected_evals in starts_and_targets:
start = tf.constant(start, dtype=dtype)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
himmelblau, initial_position=start, tolerance=1e-8))
print (results)
self.assertTrue(results.converged)
self.assertArrayNear(results.position,
np.array(expected_minima, dtype=dtype),
1e-5)
self.assertEqual(results.num_objective_evaluations, expected_evals)
def test_data_fitting(self):
"""Tests MLE estimation for a simple geometric GLM."""
n, dim = 100, 3
dtype = tf.float64
np.random.seed(234095)
x = np.random.choice([0, 1], size=[dim, n])
s = 0.01 * np.sum(x, 0)
p = 1. / (1 + np.exp(-s))
y = np.random.geometric(p)
x_data = tf.convert_to_tensor(value=x, dtype=dtype)
y_data = tf.expand_dims(tf.convert_to_tensor(value=y, dtype=dtype), -1)
@_make_val_and_grad_fn
def neg_log_likelihood(state):
state_ext = tf.expand_dims(state, 0)
linear_part = tf.matmul(state_ext, x_data)
linear_part_ex = tf.stack([tf.zeros_like(linear_part),
linear_part], axis=0)
term1 = tf.squeeze(
tf.matmul(
tf.reduce_logsumexp(input_tensor=linear_part_ex, axis=0), y_data),
-1)
term2 = (
0.5 * tf.reduce_sum(input_tensor=state_ext * state_ext, axis=-1) -
tf.reduce_sum(input_tensor=linear_part, axis=-1))
return tf.squeeze(term1 + term2)
start = tf.ones(shape=[dim], dtype=dtype)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
neg_log_likelihood, initial_position=start, tolerance=1e-6))
expected_minima = np.array(
[-0.020460034354, 0.171708568111, 0.021200423717], dtype="float64")
expected_evals = 19
self.assertArrayNear(results.position, expected_minima, 1e-6)
self.assertEqual(results.num_objective_evaluations, expected_evals)
# TODO(b/116767573): Also run in eager mode but as a separate test, otherwise
# it takes too long to run.
def test_determinism(self):
"""Tests that the results are determinsitic."""
dim = 5
@_make_val_and_grad_fn
def rastrigin(x):
"""The value and gradient of the Rastrigin function.
The Rastrigin function is a standard optimization test case. It is a
multimodal non-convex function. While it has a large number of local
minima, the global minimum is located at the origin and where the function
value is zero. The standard search domain for optimization problems is the
hypercube [-5.12, 5.12]**d in d-dimensions.
Args:
x: Real `Tensor` of shape [2]. The position at which to evaluate the
function.
Returns:
value_and_gradient: A tuple of two `Tensor`s containing
value: A scalar `Tensor` of the function value at the supplied point.
gradient: A `Tensor` of shape [2] containing the gradient of the
function along the two axes.
"""
return tf.reduce_sum(input_tensor=x**2 -
10.0 * tf.cos(2 * np.pi * x)) + 10.0 * dim
start_position = np.random.rand(dim) * 2.0 * 5.12 - 5.12
def get_results():
start = tf.constant(start_position)
return self.evaluate(tfp.optimizer.bfgs_minimize(
rastrigin, initial_position=start, tolerance=1e-5))
res1, res2 = get_results(), get_results()
self.assertTrue(res1.converged)
self.assertEqual(res1.converged, res2.converged)
self.assertEqual(res1.failed, res2.failed)
self.assertEqual(res1.num_objective_evaluations,
res2.num_objective_evaluations)
self.assertArrayNear(res1.position, res2.position, 1e-5)
self.assertAlmostEqual(res1.objective_value, res2.objective_value)
self.assertArrayNear(res1.objective_gradient, res2.objective_gradient, 1e-5)
self.assertArrayNear(res1.inverse_hessian_estimate.reshape([-1]),
res2.inverse_hessian_estimate.reshape([-1]), 1e-5)
def test_dynamic_shapes(self):
"""Can build a bfgs_op with dynamic shapes in graph mode."""
if tf.executing_eagerly(): return
minimum = np.array([1.0, 1.0])
scales = np.array([2.0, 3.0])
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
# Test with a vector of unknown dimension, and a fully unknown shape.
for shape in ([None], None):
start = tf.compat.v1.placeholder(tf.float32, shape=shape)
bfgs_op = tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8)
self.assertFalse(bfgs_op.position.shape.is_fully_defined())
with self.cached_session() as session:
results = session.run(bfgs_op, feed_dict={start: [0.6, 0.8]})
self.assertTrue(results.converged)
self.assertTrue(_norm(results.objective_gradient) <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
if __name__ == "__main__":
tf.test.main() | tensorflow_probability/python/optimizer/bfgs_test.py | """Tests for the unconstrained BFGS optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import numpy as np
from scipy.stats import special_ortho_group
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import,g-import-not-at-top
def _make_val_and_grad_fn(value_fn):
@functools.wraps(value_fn)
def val_and_grad(x):
return tfp.math.value_and_gradient(value_fn, x)
return val_and_grad
def _norm(x):
return np.linalg.norm(x, np.inf)
@test_util.run_all_in_graph_and_eager_modes
class BfgsTest(tf.test.TestCase):
"""Tests for BFGS optimization algorithm."""
def test_quadratic_bowl_2d(self):
"""Can minimize a two dimensional quadratic function."""
minimum = np.array([1.0, 1.0])
scales = np.array([2.0, 3.0])
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.constant([0.6, 0.8])
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_inverse_hessian_spec(self):
"""Checks that specifying the 'initial_inverse_hessian_estimate' works."""
minimum = np.array([1.0, 1.0], dtype=np.float32)
scales = np.array([2.0, 3.0], dtype=np.float32)
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.constant([0.6, 0.8])
test_inv_hessian = tf.constant([[2.0, 1.0], [1.0, 2.0]],
dtype=np.float32)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8,
initial_inverse_hessian_estimate=test_inv_hessian))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_bad_inverse_hessian_spec(self):
"""Checks that specifying a non-positive definite inverse hessian fails."""
minimum = np.array([1.0, 1.0], dtype=np.float32)
scales = np.array([2.0, 3.0], dtype=np.float32)
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.constant([0.6, 0.8])
bad_inv_hessian = tf.constant([[-2.0, 1.0], [1.0, -2.0]],
dtype=tf.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8,
initial_inverse_hessian_estimate=bad_inv_hessian))
def test_asymmetric_inverse_hessian_spec(self):
"""Checks that specifying a asymmetric inverse hessian fails."""
minimum = np.array([1.0, 1.0], dtype=np.float32)
scales = np.array([2.0, 3.0], dtype=np.float32)
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.constant([0.6, 0.8])
bad_inv_hessian = tf.constant([[2.0, 0.0], [1.0, 2.0]],
dtype=tf.float32)
with self.assertRaises(tf.errors.InvalidArgumentError):
self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8,
initial_inverse_hessian_estimate=bad_inv_hessian))
def test_quadratic_bowl_10d(self):
"""Can minimize a ten dimensional quadratic function."""
dim = 10
np.random.seed(14159)
minimum = np.random.randn(dim)
scales = np.exp(np.random.randn(dim))
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
start = tf.ones_like(minimum)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_quadratic_with_skew(self):
"""Can minimize a general quadratic function."""
dim = 3
np.random.seed(26535)
minimum = np.random.randn(dim)
principal_values = np.diag(np.exp(np.random.randn(dim)))
rotation = special_ortho_group.rvs(dim)
hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
@_make_val_and_grad_fn
def quadratic(x):
y = x - minimum
yp = tf.tensordot(hessian, y, axes=[1, 0])
return tf.reduce_sum(input_tensor=y * yp) / 2
start = tf.ones_like(minimum)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_quadratic_with_strong_skew(self):
"""Can minimize a strongly skewed quadratic function."""
np.random.seed(89793)
minimum = np.random.randn(3)
principal_values = np.diag(np.array([0.1, 2.0, 50.0]))
rotation = special_ortho_group.rvs(3)
hessian = np.dot(np.transpose(rotation), np.dot(principal_values, rotation))
@_make_val_and_grad_fn
def quadratic(x):
y = x - minimum
yp = tf.tensordot(hessian, y, axes=[1, 0])
return tf.reduce_sum(input_tensor=y * yp) / 2
start = tf.ones_like(minimum)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
print (final_gradient_norm)
self.assertTrue(final_gradient_norm <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
def test_rosenbrock_2d(self):
"""Tests BFGS on the Rosenbrock function.
The Rosenbrock function is a standard optimization test case. In two
dimensions, the function is (a, b > 0):
f(x, y) = (a - x)^2 + b (y - x^2)^2
The function has a global minimum at (a, a^2). This minimum lies inside
a parabolic valley (y = x^2).
"""
def rosenbrock(coord):
"""The Rosenbrock function in two dimensions with a=1, b=100.
Args:
coord: A Tensor of shape [2]. The coordinate of the point to evaluate
the function at.
Returns:
fv: A scalar tensor containing the value of the Rosenbrock function at
the supplied point.
dfx: Scalar tensor. The derivative of the function with respect to x.
dfy: Scalar tensor. The derivative of the function with respect to y.
"""
x, y = coord[0], coord[1]
fv = (1 - x)**2 + 100 * (y - x**2)**2
dfx = 2 * (x - 1) + 400 * x * (x**2 - y)
dfy = 200 * (y - x**2)
return fv, tf.stack([dfx, dfy])
start = tf.constant([-1.2, 1.0])
results = self.evaluate(tfp.optimizer.bfgs_minimize(
rosenbrock, initial_position=start, tolerance=1e-5))
self.assertTrue(results.converged)
final_gradient = results.objective_gradient
final_gradient_norm = _norm(final_gradient)
self.assertTrue(final_gradient_norm <= 1e-5)
self.assertArrayNear(results.position, np.array([1.0, 1.0]), 1e-5)
# TODO(b/116767573): Also run in eager mode but as a separate test, otherwise
# it takes too long to run.
def test_himmelblau(self):
"""Tests minimization on the Himmelblau's function.
Himmelblau's function is a standard optimization test case. The function is
given by:
f(x, y) = (x^2 + y - 11)^2 + (x + y^2 - 7)^2
The function has four minima located at (3, 2), (-2.805118, 3.131312),
(-3.779310, -3.283186), (3.584428, -1.848126).
All these minima may be reached from appropriate starting points.
"""
@_make_val_and_grad_fn
def himmelblau(coord):
x, y = coord[0], coord[1]
return (x * x + y - 11) ** 2 + (x + y * y - 7) ** 2
starts_and_targets = [
# Start Point, Target Minimum, Num evaluations expected.
[(1, 1), (3, 2), 30],
[(-2, 2), (-2.805118, 3.131312), 23],
[(-1, -1), (-3.779310, -3.283186), 29],
[(1, -2), (3.584428, -1.848126), 28]
]
dtype = "float64"
for start, expected_minima, expected_evals in starts_and_targets:
start = tf.constant(start, dtype=dtype)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
himmelblau, initial_position=start, tolerance=1e-8))
print (results)
self.assertTrue(results.converged)
self.assertArrayNear(results.position,
np.array(expected_minima, dtype=dtype),
1e-5)
self.assertEqual(results.num_objective_evaluations, expected_evals)
def test_data_fitting(self):
"""Tests MLE estimation for a simple geometric GLM."""
n, dim = 100, 3
dtype = tf.float64
np.random.seed(234095)
x = np.random.choice([0, 1], size=[dim, n])
s = 0.01 * np.sum(x, 0)
p = 1. / (1 + np.exp(-s))
y = np.random.geometric(p)
x_data = tf.convert_to_tensor(value=x, dtype=dtype)
y_data = tf.expand_dims(tf.convert_to_tensor(value=y, dtype=dtype), -1)
@_make_val_and_grad_fn
def neg_log_likelihood(state):
state_ext = tf.expand_dims(state, 0)
linear_part = tf.matmul(state_ext, x_data)
linear_part_ex = tf.stack([tf.zeros_like(linear_part),
linear_part], axis=0)
term1 = tf.squeeze(
tf.matmul(
tf.reduce_logsumexp(input_tensor=linear_part_ex, axis=0), y_data),
-1)
term2 = (
0.5 * tf.reduce_sum(input_tensor=state_ext * state_ext, axis=-1) -
tf.reduce_sum(input_tensor=linear_part, axis=-1))
return tf.squeeze(term1 + term2)
start = tf.ones(shape=[dim], dtype=dtype)
results = self.evaluate(tfp.optimizer.bfgs_minimize(
neg_log_likelihood, initial_position=start, tolerance=1e-6))
expected_minima = np.array(
[-0.020460034354, 0.171708568111, 0.021200423717], dtype="float64")
expected_evals = 19
self.assertArrayNear(results.position, expected_minima, 1e-6)
self.assertEqual(results.num_objective_evaluations, expected_evals)
# TODO(b/116767573): Also run in eager mode but as a separate test, otherwise
# it takes too long to run.
def test_determinism(self):
"""Tests that the results are determinsitic."""
dim = 5
@_make_val_and_grad_fn
def rastrigin(x):
"""The value and gradient of the Rastrigin function.
The Rastrigin function is a standard optimization test case. It is a
multimodal non-convex function. While it has a large number of local
minima, the global minimum is located at the origin and where the function
value is zero. The standard search domain for optimization problems is the
hypercube [-5.12, 5.12]**d in d-dimensions.
Args:
x: Real `Tensor` of shape [2]. The position at which to evaluate the
function.
Returns:
value_and_gradient: A tuple of two `Tensor`s containing
value: A scalar `Tensor` of the function value at the supplied point.
gradient: A `Tensor` of shape [2] containing the gradient of the
function along the two axes.
"""
return tf.reduce_sum(input_tensor=x**2 -
10.0 * tf.cos(2 * np.pi * x)) + 10.0 * dim
start_position = np.random.rand(dim) * 2.0 * 5.12 - 5.12
def get_results():
start = tf.constant(start_position)
return self.evaluate(tfp.optimizer.bfgs_minimize(
rastrigin, initial_position=start, tolerance=1e-5))
res1, res2 = get_results(), get_results()
self.assertTrue(res1.converged)
self.assertEqual(res1.converged, res2.converged)
self.assertEqual(res1.failed, res2.failed)
self.assertEqual(res1.num_objective_evaluations,
res2.num_objective_evaluations)
self.assertArrayNear(res1.position, res2.position, 1e-5)
self.assertAlmostEqual(res1.objective_value, res2.objective_value)
self.assertArrayNear(res1.objective_gradient, res2.objective_gradient, 1e-5)
self.assertArrayNear(res1.inverse_hessian_estimate.reshape([-1]),
res2.inverse_hessian_estimate.reshape([-1]), 1e-5)
def test_dynamic_shapes(self):
"""Can build a bfgs_op with dynamic shapes in graph mode."""
if tf.executing_eagerly(): return
minimum = np.array([1.0, 1.0])
scales = np.array([2.0, 3.0])
@_make_val_and_grad_fn
def quadratic(x):
return tf.reduce_sum(input_tensor=scales * (x - minimum)**2)
# Test with a vector of unknown dimension, and a fully unknown shape.
for shape in ([None], None):
start = tf.compat.v1.placeholder(tf.float32, shape=shape)
bfgs_op = tfp.optimizer.bfgs_minimize(
quadratic, initial_position=start, tolerance=1e-8)
self.assertFalse(bfgs_op.position.shape.is_fully_defined())
with self.cached_session() as session:
results = session.run(bfgs_op, feed_dict={start: [0.6, 0.8]})
self.assertTrue(results.converged)
self.assertTrue(_norm(results.objective_gradient) <= 1e-8)
self.assertArrayNear(results.position, minimum, 1e-5)
if __name__ == "__main__":
tf.test.main() | 0.94001 | 0.531209 |
import time
import webbrowser as web
from datetime import datetime
from typing import Optional
from urllib.parse import quote
import pyautogui
import pyautogui as pg
from pywhatkit.core import core, exceptions, log
import winsound
pg.FAILSAFE = False
core.check_connection()
WAIT_TO_LOAD_WEB = 8
WAIT_TO_LOAD_APP = 3
def sendwhatmsg_instantly(
phone_no: str,
message: str,
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
use_whatsapp_app: bool = False,
) -> None:
"""Send WhatsApp Message Instantly"""
wait_to_load = 0
if not core.check_number(number=phone_no):
raise exceptions.CountryCodeException("Country Code Missing in Phone Number!")
if use_whatsapp_app:
web.open(f"https://api.whatsapp.com/send?phone={phone_no}&text={quote(message)}")
wait_to_load = WAIT_TO_LOAD_APP
else:
web.open(f"https://web.whatsapp.com/send?phone={phone_no}&text={quote(message)}")
wait_to_load = WAIT_TO_LOAD_WEB
time.sleep(wait_to_load)
# frequency = 2500 # Set Frequency To 2500 Hertz
# duration = 400 # Set Duration To 1000 ms == 1 second
# winsound.Beep(frequency, duration)
pg.click(core.WIDTH / 2, core.HEIGHT / 2)
pyautogui.getActiveWindow().maximize()
time.sleep(wait_time - wait_to_load)
pg.press("enter")
pg.press("enter")
time.sleep(0.3)
pg.click(core.WIDTH / 2, core.HEIGHT / 2)
log.log_message(_time=time.localtime(), receiver=phone_no, message=message)
if tab_close:
core.close_tab(wait_time=close_time)
def sendwhatmsg(
phone_no: str,
message: str,
time_hour: int,
time_min: int,
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
) -> None:
"""Send a WhatsApp Message at a Certain Time"""
if not core.check_number(number=phone_no):
raise exceptions.CountryCodeException("Country Code Missing in Phone Number!")
if time_hour not in range(25) or time_min not in range(60):
raise Warning("Invalid Time Format!")
current_time = time.localtime()
left_time = datetime.strptime(
f"{time_hour}:{time_min}:0", "%H:%M:%S"
) - datetime.strptime(
f"{current_time.tm_hour}:{current_time.tm_min}:{current_time.tm_sec}",
"%H:%M:%S",
)
if left_time.seconds < wait_time:
raise exceptions.CallTimeException(
"Call Time must be Greater than Wait Time as WhatsApp Web takes some Time to Load!"
)
sleep_time = left_time.seconds - wait_time
print(
f"In {sleep_time} Seconds WhatsApp will open and after {wait_time} Seconds Message will be Delivered!"
)
time.sleep(sleep_time)
core.send_message(message=message, receiver=phone_no, wait_time=wait_time)
log.log_message(_time=current_time, receiver=phone_no, message=message)
if tab_close:
core.close_tab(wait_time=close_time)
def sendwhatmsg_to_group(
group_id: str,
message: str,
time_hour: int,
time_min: int,
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
) -> None:
"""Send WhatsApp Message to a Group at a Certain Time"""
if time_hour not in range(25) or time_min not in range(60):
raise Warning("Invalid Time Format!")
current_time = time.localtime()
left_time = datetime.strptime(
f"{time_hour}:{time_min}:0", "%H:%M:%S"
) - datetime.strptime(
f"{current_time.tm_hour}:{current_time.tm_min}:{current_time.tm_sec}",
"%H:%M:%S",
)
if left_time.seconds < wait_time:
raise exceptions.CallTimeException(
"Call Time must be Greater than Wait Time as WhatsApp Web takes some Time to Load!"
)
sleep_time = left_time.seconds - wait_time
print(
f"In {sleep_time} Seconds WhatsApp will open and after {wait_time} Seconds Message will be Delivered!"
)
time.sleep(sleep_time)
core.send_message(message=message, receiver=group_id, wait_time=wait_time)
log.log_message(_time=current_time, receiver=group_id, message=message)
if tab_close:
core.close_tab(wait_time=close_time)
def sendwhatmsg_to_group_instantly(
group_id: str,
message: str,
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
) -> None:
"""Send WhatsApp Message to a Group Instantly"""
current_time = time.localtime()
time.sleep(sleep_time)
core.send_message(message=message, receiver=group_id, wait_time=wait_time)
log.log_message(_time=current_time, receiver=group_id, message=message)
if tab_close:
core.close_tab(wait_time=close_time)
def sendwhats_image(
receiver: str,
img_path: str,
caption: str = "",
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
) -> None:
"""Send Image to a WhatsApp Contact or Group at a Certain Time"""
if (not receiver.isalnum()) and (not core.check_number(number=receiver)):
raise exceptions.CountryCodeException("Country Code Missing in Phone Number!")
current_time = time.localtime()
core.send_image(
path=img_path, caption=caption, receiver=receiver, wait_time=wait_time
)
log.log_image(_time=current_time, path=img_path, receiver=receiver, caption=caption)
if tab_close:
core.close_tab(wait_time=close_time)
def open_web() -> bool:
"""Opens WhatsApp Web"""
try:
web.open("https://web.whatsapp.com")
except web.Error:
return False
else:
return True | pywhatkit/whats.py | import time
import webbrowser as web
from datetime import datetime
from typing import Optional
from urllib.parse import quote
import pyautogui
import pyautogui as pg
from pywhatkit.core import core, exceptions, log
import winsound
pg.FAILSAFE = False
core.check_connection()
WAIT_TO_LOAD_WEB = 8
WAIT_TO_LOAD_APP = 3
def sendwhatmsg_instantly(
phone_no: str,
message: str,
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
use_whatsapp_app: bool = False,
) -> None:
"""Send WhatsApp Message Instantly"""
wait_to_load = 0
if not core.check_number(number=phone_no):
raise exceptions.CountryCodeException("Country Code Missing in Phone Number!")
if use_whatsapp_app:
web.open(f"https://api.whatsapp.com/send?phone={phone_no}&text={quote(message)}")
wait_to_load = WAIT_TO_LOAD_APP
else:
web.open(f"https://web.whatsapp.com/send?phone={phone_no}&text={quote(message)}")
wait_to_load = WAIT_TO_LOAD_WEB
time.sleep(wait_to_load)
# frequency = 2500 # Set Frequency To 2500 Hertz
# duration = 400 # Set Duration To 1000 ms == 1 second
# winsound.Beep(frequency, duration)
pg.click(core.WIDTH / 2, core.HEIGHT / 2)
pyautogui.getActiveWindow().maximize()
time.sleep(wait_time - wait_to_load)
pg.press("enter")
pg.press("enter")
time.sleep(0.3)
pg.click(core.WIDTH / 2, core.HEIGHT / 2)
log.log_message(_time=time.localtime(), receiver=phone_no, message=message)
if tab_close:
core.close_tab(wait_time=close_time)
def sendwhatmsg(
phone_no: str,
message: str,
time_hour: int,
time_min: int,
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
) -> None:
"""Send a WhatsApp Message at a Certain Time"""
if not core.check_number(number=phone_no):
raise exceptions.CountryCodeException("Country Code Missing in Phone Number!")
if time_hour not in range(25) or time_min not in range(60):
raise Warning("Invalid Time Format!")
current_time = time.localtime()
left_time = datetime.strptime(
f"{time_hour}:{time_min}:0", "%H:%M:%S"
) - datetime.strptime(
f"{current_time.tm_hour}:{current_time.tm_min}:{current_time.tm_sec}",
"%H:%M:%S",
)
if left_time.seconds < wait_time:
raise exceptions.CallTimeException(
"Call Time must be Greater than Wait Time as WhatsApp Web takes some Time to Load!"
)
sleep_time = left_time.seconds - wait_time
print(
f"In {sleep_time} Seconds WhatsApp will open and after {wait_time} Seconds Message will be Delivered!"
)
time.sleep(sleep_time)
core.send_message(message=message, receiver=phone_no, wait_time=wait_time)
log.log_message(_time=current_time, receiver=phone_no, message=message)
if tab_close:
core.close_tab(wait_time=close_time)
def sendwhatmsg_to_group(
group_id: str,
message: str,
time_hour: int,
time_min: int,
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
) -> None:
"""Send WhatsApp Message to a Group at a Certain Time"""
if time_hour not in range(25) or time_min not in range(60):
raise Warning("Invalid Time Format!")
current_time = time.localtime()
left_time = datetime.strptime(
f"{time_hour}:{time_min}:0", "%H:%M:%S"
) - datetime.strptime(
f"{current_time.tm_hour}:{current_time.tm_min}:{current_time.tm_sec}",
"%H:%M:%S",
)
if left_time.seconds < wait_time:
raise exceptions.CallTimeException(
"Call Time must be Greater than Wait Time as WhatsApp Web takes some Time to Load!"
)
sleep_time = left_time.seconds - wait_time
print(
f"In {sleep_time} Seconds WhatsApp will open and after {wait_time} Seconds Message will be Delivered!"
)
time.sleep(sleep_time)
core.send_message(message=message, receiver=group_id, wait_time=wait_time)
log.log_message(_time=current_time, receiver=group_id, message=message)
if tab_close:
core.close_tab(wait_time=close_time)
def sendwhatmsg_to_group_instantly(
group_id: str,
message: str,
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
) -> None:
"""Send WhatsApp Message to a Group Instantly"""
current_time = time.localtime()
time.sleep(sleep_time)
core.send_message(message=message, receiver=group_id, wait_time=wait_time)
log.log_message(_time=current_time, receiver=group_id, message=message)
if tab_close:
core.close_tab(wait_time=close_time)
def sendwhats_image(
receiver: str,
img_path: str,
caption: str = "",
wait_time: int = 15,
tab_close: bool = False,
close_time: int = 3,
) -> None:
"""Send Image to a WhatsApp Contact or Group at a Certain Time"""
if (not receiver.isalnum()) and (not core.check_number(number=receiver)):
raise exceptions.CountryCodeException("Country Code Missing in Phone Number!")
current_time = time.localtime()
core.send_image(
path=img_path, caption=caption, receiver=receiver, wait_time=wait_time
)
log.log_image(_time=current_time, path=img_path, receiver=receiver, caption=caption)
if tab_close:
core.close_tab(wait_time=close_time)
def open_web() -> bool:
"""Opens WhatsApp Web"""
try:
web.open("https://web.whatsapp.com")
except web.Error:
return False
else:
return True | 0.494141 | 0.091666 |
import discord
from discord.ext import commands
from discord import utils
import asyncio
import json
class Developers:
def __init__(self, bot):
self.bot = bot
@commands.group()
@commands.is_owner()
async def extension(self, ctx):
if ctx.invoked_subcommand is None:
embed = discord.Embed(title=f':tools: Extensions', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}extension <sub-command>**', value=f'Valid sub-commands: *reload*, *load*, *load*')
await ctx.send(embed=embed)
return
@extension.command(name='load')
@commands.is_owner()
async def load(self, ctx, *, extension: str = None):
"""Load a extension!"""
if extension == None:
embed = discord.Embed(title=f':tools: Extensions', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}extension load <extension>**', value=f'Please enter a valid extension ex: **suggest**')
await ctx.send(embed=embed)
return
try:
self.bot.load_extension(f'extensions.' + extension)
except Exception as e:
await ctx.send(f'**`ERROR:`** Could not load {extension}')
await ctx.send(f'=========================================')
await ctx.send(f'```{e}```')
else:
await ctx.send('**`SUCCESS`** Loaded extension: *{extension}*')
@extension.command(name='unload')
@commands.is_owner()
async def unload(self, ctx, *, extension: str = None):
"""Unload a extension!"""
if extension == None:
embed = discord.Embed(title=f':tools: Extensions', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}extension unload <extension>**', value=f'Please enter a valid extension ex: **suggest**')
await ctx.send(embed=embed)
return
try:
self.bot.unload_extension(f'extensions.' + extension)
except Exception as e:
await ctx.send(f'**`ERROR:`** Could not unload {extension}')
await ctx.send(f'=========================================')
await ctx.send(f'```{e}```')
else:
await ctx.send(f'**`SUCCESS`** Unloaded extension: *{extension}*')
@extension.command(name='reload')
@commands.is_owner()
async def reload(self, ctx, *, extension: str = None):
"""Reload a extension!"""
if extension == None:
embed = discord.Embed(title=f':tools: Extensions', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}extension reload <extension>**', value=f'Please enter a valid extension ex: **suggest**')
await ctx.send(embed=embed)
return
try:
self.bot.unload_extension(f'extensions.' + extension)
self.bot.load_extension(f'extensions.' + extension)
except Exception as e:
await ctx.send(f'**`ERROR:`** Could not reload {extension}')
await ctx.send(f'=========================================')
await ctx.send(f'```{e}```')
else:
await ctx.send(f'**`SUCCESS`** Reloaded extension: *{extension}*')
@commands.group(name="bot")
@commands.is_owner()
async def bots(self, ctx):
if ctx.invoked_subcommand is None:
embed = discord.Embed(title=f':tools: Bot Tools', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}bot <sub-command>**', value=f'Valid sub-commands: *stop*')
await ctx.send(embed=embed)
return
@bots.command(name='stop', alias="restart")
@commands.is_owner()
async def stop(self, ctx):
"""Stops/Restarts the bot."""
await ctx.send(f'Stopping the bot!')
await self.bot.logout()
@bots.command(name="prefix")
async def prefix(self, ctx, newprefix: str = None):
if newprefix == None:
embed = discord.Embed(title=f':tools: Settings', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}bot prefix <newprefix>**', value=f'Sets the global backup prefix, please enter a valid prefix ex: **!**')
await ctx.send(embed=embed)
return
self.bot.config["GLOBAL"]["PREFIX"] = newprefix
with open('settings.json', 'w') as f:
json.dump(selg.bot.config, f, indent=2)
await ctx.send('Changed the global backup prefix to: **{}**'.format(newprefix))
return
async def on_command_error(self, ctx, error):
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, discord.ext.commands.NoPrivateMessage):
await ctx.send(f':no_entry: This command can only be used inside a server!')
return
if isinstance(error, discord.ext.commands.NotOwner):
await ctx.send(f':no_entry: You do not have permission to use this command!')
return
if isinstance(error, discord.ext.commands.MissingPermissions):
await ctx.send(f':no_entry: You do not have permission to use this command!')
return
if isinstance(error, discord.ext.commands.BotMissingPermissions):
await ctx.send(":no_entry: The bot does not have sufficient permissions to run this command, please contact the guild owner about this!")
await ctx.guild.owner.send(f':no_entry: The bot does not have sufficient permissions to run command: {ctx.prefix}{ctx.name}, please make sure the bot has the following permissions: `Administrator` or the following: `Manage messages`, `Add reactions`, `Read messages`, `Send messages`, `Read message history`, `Embed links` and `Attach file`')
return
await ctx.send(f'**`ERROR:`** Error while running the command **{ctx.prefix}{ctx.command.qualified_name}**')
await ctx.send(f'=========================================')
await ctx.send(f'```{error}```')
def setup(bot):
bot.add_cog(Developers(bot)) | extensions/developer.py | import discord
from discord.ext import commands
from discord import utils
import asyncio
import json
class Developers:
def __init__(self, bot):
self.bot = bot
@commands.group()
@commands.is_owner()
async def extension(self, ctx):
if ctx.invoked_subcommand is None:
embed = discord.Embed(title=f':tools: Extensions', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}extension <sub-command>**', value=f'Valid sub-commands: *reload*, *load*, *load*')
await ctx.send(embed=embed)
return
@extension.command(name='load')
@commands.is_owner()
async def load(self, ctx, *, extension: str = None):
"""Load a extension!"""
if extension == None:
embed = discord.Embed(title=f':tools: Extensions', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}extension load <extension>**', value=f'Please enter a valid extension ex: **suggest**')
await ctx.send(embed=embed)
return
try:
self.bot.load_extension(f'extensions.' + extension)
except Exception as e:
await ctx.send(f'**`ERROR:`** Could not load {extension}')
await ctx.send(f'=========================================')
await ctx.send(f'```{e}```')
else:
await ctx.send('**`SUCCESS`** Loaded extension: *{extension}*')
@extension.command(name='unload')
@commands.is_owner()
async def unload(self, ctx, *, extension: str = None):
"""Unload a extension!"""
if extension == None:
embed = discord.Embed(title=f':tools: Extensions', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}extension unload <extension>**', value=f'Please enter a valid extension ex: **suggest**')
await ctx.send(embed=embed)
return
try:
self.bot.unload_extension(f'extensions.' + extension)
except Exception as e:
await ctx.send(f'**`ERROR:`** Could not unload {extension}')
await ctx.send(f'=========================================')
await ctx.send(f'```{e}```')
else:
await ctx.send(f'**`SUCCESS`** Unloaded extension: *{extension}*')
@extension.command(name='reload')
@commands.is_owner()
async def reload(self, ctx, *, extension: str = None):
"""Reload a extension!"""
if extension == None:
embed = discord.Embed(title=f':tools: Extensions', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}extension reload <extension>**', value=f'Please enter a valid extension ex: **suggest**')
await ctx.send(embed=embed)
return
try:
self.bot.unload_extension(f'extensions.' + extension)
self.bot.load_extension(f'extensions.' + extension)
except Exception as e:
await ctx.send(f'**`ERROR:`** Could not reload {extension}')
await ctx.send(f'=========================================')
await ctx.send(f'```{e}```')
else:
await ctx.send(f'**`SUCCESS`** Reloaded extension: *{extension}*')
@commands.group(name="bot")
@commands.is_owner()
async def bots(self, ctx):
if ctx.invoked_subcommand is None:
embed = discord.Embed(title=f':tools: Bot Tools', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}bot <sub-command>**', value=f'Valid sub-commands: *stop*')
await ctx.send(embed=embed)
return
@bots.command(name='stop', alias="restart")
@commands.is_owner()
async def stop(self, ctx):
"""Stops/Restarts the bot."""
await ctx.send(f'Stopping the bot!')
await self.bot.logout()
@bots.command(name="prefix")
async def prefix(self, ctx, newprefix: str = None):
if newprefix == None:
embed = discord.Embed(title=f':tools: Settings', color=0xffffff)
embed.add_field(name=f'Invalid syntax! **{ctx.prefix}bot prefix <newprefix>**', value=f'Sets the global backup prefix, please enter a valid prefix ex: **!**')
await ctx.send(embed=embed)
return
self.bot.config["GLOBAL"]["PREFIX"] = newprefix
with open('settings.json', 'w') as f:
json.dump(selg.bot.config, f, indent=2)
await ctx.send('Changed the global backup prefix to: **{}**'.format(newprefix))
return
async def on_command_error(self, ctx, error):
if isinstance(error, commands.CommandNotFound):
return
if isinstance(error, discord.ext.commands.NoPrivateMessage):
await ctx.send(f':no_entry: This command can only be used inside a server!')
return
if isinstance(error, discord.ext.commands.NotOwner):
await ctx.send(f':no_entry: You do not have permission to use this command!')
return
if isinstance(error, discord.ext.commands.MissingPermissions):
await ctx.send(f':no_entry: You do not have permission to use this command!')
return
if isinstance(error, discord.ext.commands.BotMissingPermissions):
await ctx.send(":no_entry: The bot does not have sufficient permissions to run this command, please contact the guild owner about this!")
await ctx.guild.owner.send(f':no_entry: The bot does not have sufficient permissions to run command: {ctx.prefix}{ctx.name}, please make sure the bot has the following permissions: `Administrator` or the following: `Manage messages`, `Add reactions`, `Read messages`, `Send messages`, `Read message history`, `Embed links` and `Attach file`')
return
await ctx.send(f'**`ERROR:`** Error while running the command **{ctx.prefix}{ctx.command.qualified_name}**')
await ctx.send(f'=========================================')
await ctx.send(f'```{error}```')
def setup(bot):
bot.add_cog(Developers(bot)) | 0.532911 | 0.17971 |
''' distribute- and pip-enabled setup.py '''
from __future__ import print_function
import sys
import logging
import os
import re
import shutil
# ----- overrides -----
# set these to anything but None to override the automatic defaults
packages = None
package_name = None
package_data = None
scripts = None
# ---------------------
# ----- control flags -----
# fallback to setuptools if distribute isn't found
setup_tools_fallback = True
# don't include subdir named 'tests' in package_data
skip_tests = False
# print some extra debugging info
debug = True
# -------------------------
if debug:
logging.basicConfig(level=logging.DEBUG)
# distribute import and testing
try:
import distribute_setup
distribute_setup.use_setuptools()
logging.debug("distribute_setup.py imported and used")
except ImportError:
# fallback to setuptools?
# distribute_setup.py was not in this directory
if not (setup_tools_fallback):
import setuptools
if not (hasattr(setuptools, '_distribute')
and setuptools._distribute):
raise ImportError("distribute was not found and fallback to "
"setuptools was not allowed")
else:
logging.debug("distribute_setup.py not found, defaulted to "
"system distribute")
else:
logging.debug("distribute_setup.py not found, defaulting to system "
"setuptools")
import setuptools
from setuptools.command.install import install as install_
def find_scripts():
return [s for s in setuptools.findall('bin/') if os.path.splitext(s)[1] != '.pyc']
def package_to_path(package):
"""
Convert a package (as found by setuptools.find_packages)
e.g. "foo.bar" to usable path
e.g. "foo/bar"
No idea if this works on windows
"""
return package.replace('.','/')
def find_subdirectories(package):
"""
Get the subdirectories within a package
This will include resources (non-submodules) and submodules
"""
try:
subdirectories = os.walk(package_to_path(package)).next()[1]
except StopIteration:
subdirectories = []
return subdirectories
def subdir_findall(dir, subdir):
"""
Find all files in a subdirectory and return paths relative to dir
This is similar to (and uses) setuptools.findall
However, the paths returned are in the form needed for package_data
"""
strip_n = len(dir.split('/'))
path = '/'.join((dir, subdir))
return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)]
def find_package_data(packages):
"""
For a list of packages, find the package_data
This function scans the subdirectories of a package and considers all
non-submodule subdirectories as resources, including them in
the package_data
Returns a dictionary suitable for setup(package_data=<result>)
"""
package_data = {}
for package in packages:
package_data[package] = []
for subdir in find_subdirectories(package):
if '.'.join((package, subdir)) in packages: # skip submodules
logging.debug("skipping submodule %s/%s" % (package, subdir))
continue
if skip_tests and (subdir == 'tests'): # skip tests
logging.debug("skipping tests %s/%s" % (package, subdir))
continue
package_data[package] += subdir_findall(package_to_path(package), subdir)
return package_data
def readme():
try:
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
return f.read()
except (IOError, OSError):
return ''
def get_version():
src_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'src')
sys.path = [src_path] + sys.path
from brainy.version import brainy_version
return brainy_version
# ----------- Override defaults here ----------------
scripts = [
'brainy',
'bin/brainy-daemon',
'bin/brainy-config',
'bin/brainy-frames',
'bin/brainy-project',
'bin/brainy-web',
]
# packages = [
# 'brainy',
# 'brainy.apps',
# 'brainy.pipes',
# 'brainy.process',
# 'brainy.scheduler',
# 'brainy.workflows',
# ]
package_data = {'': ['*.html', '*.svg', '*.js']}
if packages is None: packages = setuptools.find_packages('src')
if len(packages) == 0: raise Exception("No valid packages found")
if package_name is None: package_name = packages[0]
if package_data is None: package_data = find_package_data(packages)
if scripts is None: scripts = find_scripts()
class install(install_):
"""Customized setuptools install command - prints a friendly greeting."""
def run(self):
print("Hello, brainy user :)")
install_.run(self)
print("Post install..")
self.copy_package_data()
self.init_config()
@classmethod
def init_config(cls):
src_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'src')
sys.path = [src_path] + sys.path
from brainy.config import write_user_config
print('Initializing brainy config in user home.')
write_user_config()
@classmethod
def copy_package_data(cls, brainy_folder_path=None):
if brainy_folder_path is None:
brainy_folder_path = os.path.expanduser('~/.brainy')
if os.path.exists(brainy_folder_path):
print('Warning! brainy user folder already exists: %s.. ' %
brainy_folder_path +
'\nSkipping package data copying!' +
'Consider `rm -rf ~/.brainy/`?')
return
os.makedirs(brainy_folder_path)
PREFIX = os.path.dirname(__file__)
# Copy workflows.
for folder in ['empty', 'demo']:
source = os.path.join(PREFIX, 'src', 'brainy', 'workflows', folder)
dest = os.path.join(brainy_folder_path, 'workflows', folder)
logging.debug('Copying data %s -> %s' % (source, dest))
shutil.copytree(source, dest)
# Copy lib.
for folder in ['matlab', 'python']:
source = os.path.join(PREFIX, 'src', 'brainy', 'lib', folder)
dest = os.path.join(brainy_folder_path, 'lib', folder)
logging.debug('Copying data %s -> %s' % (source, dest))
shutil.copytree(source, dest)
# Copy ui/web.
source = os.path.join(PREFIX, 'ui')
dest = os.path.join(brainy_folder_path, 'ui')
logging.debug('Copying data %s -> %s' % (source, dest))
shutil.copytree(source, dest)
# Is it pip or conda?
if sys.argv[0] != 'setup.py':
def post_installi():
print('Outside of setup.py')
install.copy_package_data()
install.init_config()
import atexit
atexit.register(post_installi)
setuptools.setup(
name='brainy-mind',
version=get_version(),
description='brainy is a nimble workflow managing tool which is a part of '
'iBRAIN framework for scientific computation primarily '
'applied for BigData analysis in context of HPC and HTS',
long_description=readme(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/pelkmanslab/brainy',
license='MIT',
platforms=['Linux', 'OS-X'],
classifiers=[
'Topic :: System :: Distributed Computing',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: System :: Emulators',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Unix Shell',
'Programming Language :: Ruby',
'Programming Language :: Java',
'Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
],
scripts=scripts,
packages=packages,
package_dir={'': 'src'},
package_data=package_data,
include_package_data=True,
download_url='https://github.com/pelkmanslab/brainy/tarball/master',
setup_requires=[
'PyYAML>=3.11',
],
install_requires=[
'pipette>=0.1.8',
'tree_output>=0.1.4',
'sh>=1.09',
'PyYAML>=3.11',
'requests>=2.6.0',
'findtools>=1.0.3',
'Twisted>=14.0.2',
'DaemonCxt>=1.5.7',
],
cmdclass={
'install': install,
},
tests_require=['nose>=1.0'],
test_suite='nose.collector',
) | setup.py | ''' distribute- and pip-enabled setup.py '''
from __future__ import print_function
import sys
import logging
import os
import re
import shutil
# ----- overrides -----
# set these to anything but None to override the automatic defaults
packages = None
package_name = None
package_data = None
scripts = None
# ---------------------
# ----- control flags -----
# fallback to setuptools if distribute isn't found
setup_tools_fallback = True
# don't include subdir named 'tests' in package_data
skip_tests = False
# print some extra debugging info
debug = True
# -------------------------
if debug:
logging.basicConfig(level=logging.DEBUG)
# distribute import and testing
try:
import distribute_setup
distribute_setup.use_setuptools()
logging.debug("distribute_setup.py imported and used")
except ImportError:
# fallback to setuptools?
# distribute_setup.py was not in this directory
if not (setup_tools_fallback):
import setuptools
if not (hasattr(setuptools, '_distribute')
and setuptools._distribute):
raise ImportError("distribute was not found and fallback to "
"setuptools was not allowed")
else:
logging.debug("distribute_setup.py not found, defaulted to "
"system distribute")
else:
logging.debug("distribute_setup.py not found, defaulting to system "
"setuptools")
import setuptools
from setuptools.command.install import install as install_
def find_scripts():
return [s for s in setuptools.findall('bin/') if os.path.splitext(s)[1] != '.pyc']
def package_to_path(package):
"""
Convert a package (as found by setuptools.find_packages)
e.g. "foo.bar" to usable path
e.g. "foo/bar"
No idea if this works on windows
"""
return package.replace('.','/')
def find_subdirectories(package):
"""
Get the subdirectories within a package
This will include resources (non-submodules) and submodules
"""
try:
subdirectories = os.walk(package_to_path(package)).next()[1]
except StopIteration:
subdirectories = []
return subdirectories
def subdir_findall(dir, subdir):
"""
Find all files in a subdirectory and return paths relative to dir
This is similar to (and uses) setuptools.findall
However, the paths returned are in the form needed for package_data
"""
strip_n = len(dir.split('/'))
path = '/'.join((dir, subdir))
return ['/'.join(s.split('/')[strip_n:]) for s in setuptools.findall(path)]
def find_package_data(packages):
"""
For a list of packages, find the package_data
This function scans the subdirectories of a package and considers all
non-submodule subdirectories as resources, including them in
the package_data
Returns a dictionary suitable for setup(package_data=<result>)
"""
package_data = {}
for package in packages:
package_data[package] = []
for subdir in find_subdirectories(package):
if '.'.join((package, subdir)) in packages: # skip submodules
logging.debug("skipping submodule %s/%s" % (package, subdir))
continue
if skip_tests and (subdir == 'tests'): # skip tests
logging.debug("skipping tests %s/%s" % (package, subdir))
continue
package_data[package] += subdir_findall(package_to_path(package), subdir)
return package_data
def readme():
try:
with open(os.path.join(os.path.dirname(__file__), 'README.md')) as f:
return f.read()
except (IOError, OSError):
return ''
def get_version():
src_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'src')
sys.path = [src_path] + sys.path
from brainy.version import brainy_version
return brainy_version
# ----------- Override defaults here ----------------
scripts = [
'brainy',
'bin/brainy-daemon',
'bin/brainy-config',
'bin/brainy-frames',
'bin/brainy-project',
'bin/brainy-web',
]
# packages = [
# 'brainy',
# 'brainy.apps',
# 'brainy.pipes',
# 'brainy.process',
# 'brainy.scheduler',
# 'brainy.workflows',
# ]
package_data = {'': ['*.html', '*.svg', '*.js']}
if packages is None: packages = setuptools.find_packages('src')
if len(packages) == 0: raise Exception("No valid packages found")
if package_name is None: package_name = packages[0]
if package_data is None: package_data = find_package_data(packages)
if scripts is None: scripts = find_scripts()
class install(install_):
"""Customized setuptools install command - prints a friendly greeting."""
def run(self):
print("Hello, brainy user :)")
install_.run(self)
print("Post install..")
self.copy_package_data()
self.init_config()
@classmethod
def init_config(cls):
src_path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'src')
sys.path = [src_path] + sys.path
from brainy.config import write_user_config
print('Initializing brainy config in user home.')
write_user_config()
@classmethod
def copy_package_data(cls, brainy_folder_path=None):
if brainy_folder_path is None:
brainy_folder_path = os.path.expanduser('~/.brainy')
if os.path.exists(brainy_folder_path):
print('Warning! brainy user folder already exists: %s.. ' %
brainy_folder_path +
'\nSkipping package data copying!' +
'Consider `rm -rf ~/.brainy/`?')
return
os.makedirs(brainy_folder_path)
PREFIX = os.path.dirname(__file__)
# Copy workflows.
for folder in ['empty', 'demo']:
source = os.path.join(PREFIX, 'src', 'brainy', 'workflows', folder)
dest = os.path.join(brainy_folder_path, 'workflows', folder)
logging.debug('Copying data %s -> %s' % (source, dest))
shutil.copytree(source, dest)
# Copy lib.
for folder in ['matlab', 'python']:
source = os.path.join(PREFIX, 'src', 'brainy', 'lib', folder)
dest = os.path.join(brainy_folder_path, 'lib', folder)
logging.debug('Copying data %s -> %s' % (source, dest))
shutil.copytree(source, dest)
# Copy ui/web.
source = os.path.join(PREFIX, 'ui')
dest = os.path.join(brainy_folder_path, 'ui')
logging.debug('Copying data %s -> %s' % (source, dest))
shutil.copytree(source, dest)
# Is it pip or conda?
if sys.argv[0] != 'setup.py':
def post_installi():
print('Outside of setup.py')
install.copy_package_data()
install.init_config()
import atexit
atexit.register(post_installi)
setuptools.setup(
name='brainy-mind',
version=get_version(),
description='brainy is a nimble workflow managing tool which is a part of '
'iBRAIN framework for scientific computation primarily '
'applied for BigData analysis in context of HPC and HTS',
long_description=readme(),
author='<NAME>',
author_email='<EMAIL>',
url='https://github.com/pelkmanslab/brainy',
license='MIT',
platforms=['Linux', 'OS-X'],
classifiers=[
'Topic :: System :: Distributed Computing',
'Topic :: Scientific/Engineering :: Image Recognition',
'Topic :: Scientific/Engineering :: Information Analysis',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: System :: Emulators',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Programming Language :: Python',
'Programming Language :: Unix Shell',
'Programming Language :: Ruby',
'Programming Language :: Java',
'Development Status :: 4 - Beta',
'Operating System :: POSIX :: Linux',
'Operating System :: MacOS',
],
scripts=scripts,
packages=packages,
package_dir={'': 'src'},
package_data=package_data,
include_package_data=True,
download_url='https://github.com/pelkmanslab/brainy/tarball/master',
setup_requires=[
'PyYAML>=3.11',
],
install_requires=[
'pipette>=0.1.8',
'tree_output>=0.1.4',
'sh>=1.09',
'PyYAML>=3.11',
'requests>=2.6.0',
'findtools>=1.0.3',
'Twisted>=14.0.2',
'DaemonCxt>=1.5.7',
],
cmdclass={
'install': install,
},
tests_require=['nose>=1.0'],
test_suite='nose.collector',
) | 0.336113 | 0.098729 |
from __future__ import print_function
import datetime
import os
import sys
import pygrib
import pytz
from pyiem.datatypes import distance
from pyiem.plot import MapPlot
import matplotlib.cm as cm
def do(ts, hours):
"""
Create a plot of precipitation stage4 estimates for some day
"""
ts = ts.replace(minute=0)
sts = ts - datetime.timedelta(hours=hours)
ets = ts
interval = datetime.timedelta(hours=1)
now = sts
total = None
lts = None
while now < ets:
fn = ("/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.01h.grib"
) % (now.strftime("%Y/%m/%d"), now.strftime("%Y%m%d%H"))
if os.path.isfile(fn):
lts = now
grbs = pygrib.open(fn)
if total is None:
g = grbs[1]
total = g["values"].filled(0)
lats, lons = g.latlons()
else:
total += grbs[1]["values"].filled(0)
grbs.close()
now += interval
if lts is None and ts.hour > 1:
print('Missing StageIV data!')
if lts is None:
return
cmap = cm.get_cmap("jet")
cmap.set_under('white')
cmap.set_over('black')
clevs = [0.01, 0.1, 0.25, 0.5, 1, 2, 3, 5, 8, 9.9]
localtime = (ts - datetime.timedelta(minutes=1)).astimezone(
pytz.timezone("America/Chicago"))
for sector in ['iowa', 'midwest', 'conus']:
mp = MapPlot(sector=sector,
title='NCEP Stage IV %s Hour Precipitation' % (hours,),
subtitle='Total up to %s' % (
localtime.strftime("%d %B %Y %I %p %Z"),))
mp.pcolormesh(lons, lats, distance(total, 'MM').value('IN'), clevs,
units='inch')
pqstr = "plot %s %s00 %s_stage4_%sh.png %s_stage4_%sh_%s.png png" % (
'ac', ts.strftime("%Y%m%d%H"), sector, hours,
sector, hours, ts.strftime("%H"))
if sector == 'iowa':
mp.drawcounties()
mp.postprocess(pqstr=pqstr)
mp.close()
def main(argv):
"""Go Main Go"""
if len(argv) == 4:
ts = datetime.datetime(int(argv[1]), int(argv[2]),
int(argv[3]), int(argv[4]))
hr = int(argv[5])
else:
ts = datetime.datetime.utcnow()
hr = int(sys.argv[1])
ts = ts.replace(tzinfo=pytz.utc)
do(ts, hr)
if __name__ == "__main__":
main(sys.argv) | scripts/current/stage4_xhour.py | from __future__ import print_function
import datetime
import os
import sys
import pygrib
import pytz
from pyiem.datatypes import distance
from pyiem.plot import MapPlot
import matplotlib.cm as cm
def do(ts, hours):
"""
Create a plot of precipitation stage4 estimates for some day
"""
ts = ts.replace(minute=0)
sts = ts - datetime.timedelta(hours=hours)
ets = ts
interval = datetime.timedelta(hours=1)
now = sts
total = None
lts = None
while now < ets:
fn = ("/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.01h.grib"
) % (now.strftime("%Y/%m/%d"), now.strftime("%Y%m%d%H"))
if os.path.isfile(fn):
lts = now
grbs = pygrib.open(fn)
if total is None:
g = grbs[1]
total = g["values"].filled(0)
lats, lons = g.latlons()
else:
total += grbs[1]["values"].filled(0)
grbs.close()
now += interval
if lts is None and ts.hour > 1:
print('Missing StageIV data!')
if lts is None:
return
cmap = cm.get_cmap("jet")
cmap.set_under('white')
cmap.set_over('black')
clevs = [0.01, 0.1, 0.25, 0.5, 1, 2, 3, 5, 8, 9.9]
localtime = (ts - datetime.timedelta(minutes=1)).astimezone(
pytz.timezone("America/Chicago"))
for sector in ['iowa', 'midwest', 'conus']:
mp = MapPlot(sector=sector,
title='NCEP Stage IV %s Hour Precipitation' % (hours,),
subtitle='Total up to %s' % (
localtime.strftime("%d %B %Y %I %p %Z"),))
mp.pcolormesh(lons, lats, distance(total, 'MM').value('IN'), clevs,
units='inch')
pqstr = "plot %s %s00 %s_stage4_%sh.png %s_stage4_%sh_%s.png png" % (
'ac', ts.strftime("%Y%m%d%H"), sector, hours,
sector, hours, ts.strftime("%H"))
if sector == 'iowa':
mp.drawcounties()
mp.postprocess(pqstr=pqstr)
mp.close()
def main(argv):
"""Go Main Go"""
if len(argv) == 4:
ts = datetime.datetime(int(argv[1]), int(argv[2]),
int(argv[3]), int(argv[4]))
hr = int(argv[5])
else:
ts = datetime.datetime.utcnow()
hr = int(sys.argv[1])
ts = ts.replace(tzinfo=pytz.utc)
do(ts, hr)
if __name__ == "__main__":
main(sys.argv) | 0.398289 | 0.221877 |
from __future__ import division
import numpy as np
from .J_table import J_table
import sys
from time import time
from numpy import log, sqrt, exp, pi
from scipy.signal import fftconvolve as convolve
def P_IA_deltaE2(k,P):
N=k.size
n= np.arange(-N+1,N )
dL=log(k[1])-log(k[0])
s=n*dL
cut=3
high_s=s[s > cut]
low_s=s[s < -cut]
mid_high_s=s[ (s <= cut) & (s > 0)]
mid_low_s=s[ (s >= -cut) & (s < 0)]
# For Zbar
Z1=lambda r : 30. + 146*r**2 - 110*r**4 + 30*r**6 + log(np.absolute(r-1.)/(r+1.))*(15./r - 60.*r + 90*r**3 - 60*r**5 + 15*r**7)
Z1_high=lambda r : r**2 * (4/7 - 4/7 * r**2 + 12/49 * r**4 - 4/147 * r**6 - 4/1617 * r**8 - 4/7007 * r**10 - 4/21021 * r**12 - 4/51051 * r**14)
Z1_low=lambda r: r**2 * (-4/(21021 * r**10) - 4/(7007 * r**8) - 4/(1617 * r**6) - 4/(147 * r**4) + 12/(49 * r**2))
f_mid_low=Z1(exp(-mid_low_s))*exp(-mid_low_s)
f_mid_high=Z1(exp(-mid_high_s))*exp(-mid_high_s)
f_high = Z1_high(exp(-high_s))*exp(-high_s)
f_low = Z1_low(exp(-low_s))*exp(-low_s)
f=np.hstack((f_low,f_mid_low,96.,f_mid_high,f_high))
# print(f)
g= convolve(P, f) * dL
g_k=g[N-1:2*N-1]
deltaE2= k**3/(896.*pi**2) * P*g_k
return deltaE2
def IA_deltaE1():
# Ordering is \alpha, \beta, l_1, l_2, l, A coeficient
l_mat_deltaE1= np.array([[0,0,0,2,0,17./21],\
[0,0,0,2,2,4./21],\
[1,-1,0,2,1,1./2],\
[-1,1,0,2,1,1./2]], dtype=float)
table=np.zeros(10,dtype=float)
for i in range(l_mat_deltaE1.shape[0]):
x=J_table(l_mat_deltaE1[i])
table=np.row_stack((table,x))
return table[1:,:]
def IA_0E0E():
# Ordering is \alpha, \beta, l_1, l_2, l, A coeficient
l_mat_0E0E= np.array([[0,0,0,0,0,29./90],\
[0,0,2,0,0,5./63],\
[0,0,2,2,0,19./18],\
[0,0,0,4,0,19./35]], dtype=float)
table=np.zeros(10,dtype=float)
for i in range(l_mat_0E0E.shape[0]):
x=J_table(l_mat_0E0E[i])
table=np.row_stack((table,x))
return table[1:,:]
def IA_0B0B():
# Ordering is \alpha, \beta, l_1, l_2, l, A coeficient
l_mat_0B0B= np.array([[0,0,0,0,0,2./45],\
[0,0,2,0,0,-44./63],\
[0,0,2,2,0,-8./9],\
[0,0,0,4,0,-16./35],\
[0,0,1,1,1,2.]], dtype=float)
table=np.zeros(10,dtype=float)
for i in range(l_mat_0B0B.shape[0]):
x=J_table(l_mat_0B0B[i])
table=np.row_stack((table,x))
return table[1:,:] | fastpt/IA_ta.py | from __future__ import division
import numpy as np
from .J_table import J_table
import sys
from time import time
from numpy import log, sqrt, exp, pi
from scipy.signal import fftconvolve as convolve
def P_IA_deltaE2(k,P):
N=k.size
n= np.arange(-N+1,N )
dL=log(k[1])-log(k[0])
s=n*dL
cut=3
high_s=s[s > cut]
low_s=s[s < -cut]
mid_high_s=s[ (s <= cut) & (s > 0)]
mid_low_s=s[ (s >= -cut) & (s < 0)]
# For Zbar
Z1=lambda r : 30. + 146*r**2 - 110*r**4 + 30*r**6 + log(np.absolute(r-1.)/(r+1.))*(15./r - 60.*r + 90*r**3 - 60*r**5 + 15*r**7)
Z1_high=lambda r : r**2 * (4/7 - 4/7 * r**2 + 12/49 * r**4 - 4/147 * r**6 - 4/1617 * r**8 - 4/7007 * r**10 - 4/21021 * r**12 - 4/51051 * r**14)
Z1_low=lambda r: r**2 * (-4/(21021 * r**10) - 4/(7007 * r**8) - 4/(1617 * r**6) - 4/(147 * r**4) + 12/(49 * r**2))
f_mid_low=Z1(exp(-mid_low_s))*exp(-mid_low_s)
f_mid_high=Z1(exp(-mid_high_s))*exp(-mid_high_s)
f_high = Z1_high(exp(-high_s))*exp(-high_s)
f_low = Z1_low(exp(-low_s))*exp(-low_s)
f=np.hstack((f_low,f_mid_low,96.,f_mid_high,f_high))
# print(f)
g= convolve(P, f) * dL
g_k=g[N-1:2*N-1]
deltaE2= k**3/(896.*pi**2) * P*g_k
return deltaE2
def IA_deltaE1():
# Ordering is \alpha, \beta, l_1, l_2, l, A coeficient
l_mat_deltaE1= np.array([[0,0,0,2,0,17./21],\
[0,0,0,2,2,4./21],\
[1,-1,0,2,1,1./2],\
[-1,1,0,2,1,1./2]], dtype=float)
table=np.zeros(10,dtype=float)
for i in range(l_mat_deltaE1.shape[0]):
x=J_table(l_mat_deltaE1[i])
table=np.row_stack((table,x))
return table[1:,:]
def IA_0E0E():
# Ordering is \alpha, \beta, l_1, l_2, l, A coeficient
l_mat_0E0E= np.array([[0,0,0,0,0,29./90],\
[0,0,2,0,0,5./63],\
[0,0,2,2,0,19./18],\
[0,0,0,4,0,19./35]], dtype=float)
table=np.zeros(10,dtype=float)
for i in range(l_mat_0E0E.shape[0]):
x=J_table(l_mat_0E0E[i])
table=np.row_stack((table,x))
return table[1:,:]
def IA_0B0B():
# Ordering is \alpha, \beta, l_1, l_2, l, A coeficient
l_mat_0B0B= np.array([[0,0,0,0,0,2./45],\
[0,0,2,0,0,-44./63],\
[0,0,2,2,0,-8./9],\
[0,0,0,4,0,-16./35],\
[0,0,1,1,1,2.]], dtype=float)
table=np.zeros(10,dtype=float)
for i in range(l_mat_0B0B.shape[0]):
x=J_table(l_mat_0B0B[i])
table=np.row_stack((table,x))
return table[1:,:] | 0.16099 | 0.226431 |
from fractions import Fraction
from wick.expression import AExpression
from wick.wick import apply_wick
from wick.convenience import one_e, two_e, E1, E2, commute
from wick.convenience import ketE1, ketE2, ketEip1, ketEea1
from wick.convenience import ketEea2, ketEip2, ketEdea1, ketEdip1
from wick.convenience import braE1, braEip1, braEea1
from wick.convenience import braEea2, braEip2, braEdea1, braEdip1
H1 = one_e("f", ["occ", "vir"], norder=True)
H2 = two_e("I", ["occ", "vir"], norder=True, compress=True)
H = H1 + H2
T1 = E1("t", ["occ"], ["vir"])
T2 = E2("t", ["occ"], ["vir"])
T = T1 + T2
HT = commute(H, T)
HTT = commute(HT, T)
HTTT = commute(HTT, T)
HTTTT = commute(HTTT, T)
Hbar = H + HT + Fraction('1/2')*HTT + Fraction('1/6')*HTTT
# ov piece
ket = ketE1("occ", "vir")
S = Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("F_{ov} = ")
print(final)
# vv piece
ket = ketEea1("vir")
bra = braEea1("vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("F_{vv} = ")
print(final)
# oo piece
ket = ketEip1("occ")
bra = braEip1("occ")
S = -1*bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((1, 0))
print("F_{oo} = ")
print(final)
Hbar += Fraction('1/24')*HTTTT
# vvoo piece
ket = ketE2("occ", "vir", "occ", "vir")
S = Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("W_{oovv} = ")
print(final)
# vovv piece
ket = ketEea2("occ", "vir", "vir")
bra = braEea1("vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("W_{vovv} = ")
print(final)
# ooov piece
ket = ketEip2("occ", "occ", "vir")
bra = braEip1("occ")
S = -1*bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((1, 2, 0, 3))
print("W_{ooov} = ")
print(final)
# vvvv piece
ket = ketEdea1("vir", "vir")
bra = braEdea1("vir", "vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("W_{vvvv} = ")
print(final)
# oooo piece
ket = ketEdip1("occ", "occ")
bra = braEdip1("occ", "occ")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((2, 3, 0, 1))
print("W_{oooo} = ")
print(final)
# voov piece
ket = ketE1("occ", "vir")
bra = braE1("occ", "vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((0, 2, 1, 3))
print("W_{voov} = ")
print(final)
# vvvo piece
ket = ketEea1("vir")
bra = braEea2("occ", "vir", "vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((0, 1, 3, 2))
print("W_{vvvo} = ")
print(final)
# ovoo piece
ket = ketEip1("occ")
bra = braEip2("occ", "occ", "vir")
S = -1*bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((3, 0, 1, 2))
print("W_{ovoo} = ")
print(final) | examples/ccsd_Heff.py | from fractions import Fraction
from wick.expression import AExpression
from wick.wick import apply_wick
from wick.convenience import one_e, two_e, E1, E2, commute
from wick.convenience import ketE1, ketE2, ketEip1, ketEea1
from wick.convenience import ketEea2, ketEip2, ketEdea1, ketEdip1
from wick.convenience import braE1, braEip1, braEea1
from wick.convenience import braEea2, braEip2, braEdea1, braEdip1
H1 = one_e("f", ["occ", "vir"], norder=True)
H2 = two_e("I", ["occ", "vir"], norder=True, compress=True)
H = H1 + H2
T1 = E1("t", ["occ"], ["vir"])
T2 = E2("t", ["occ"], ["vir"])
T = T1 + T2
HT = commute(H, T)
HTT = commute(HT, T)
HTTT = commute(HTT, T)
HTTTT = commute(HTTT, T)
Hbar = H + HT + Fraction('1/2')*HTT + Fraction('1/6')*HTTT
# ov piece
ket = ketE1("occ", "vir")
S = Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("F_{ov} = ")
print(final)
# vv piece
ket = ketEea1("vir")
bra = braEea1("vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("F_{vv} = ")
print(final)
# oo piece
ket = ketEip1("occ")
bra = braEip1("occ")
S = -1*bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((1, 0))
print("F_{oo} = ")
print(final)
Hbar += Fraction('1/24')*HTTTT
# vvoo piece
ket = ketE2("occ", "vir", "occ", "vir")
S = Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("W_{oovv} = ")
print(final)
# vovv piece
ket = ketEea2("occ", "vir", "vir")
bra = braEea1("vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("W_{vovv} = ")
print(final)
# ooov piece
ket = ketEip2("occ", "occ", "vir")
bra = braEip1("occ")
S = -1*bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((1, 2, 0, 3))
print("W_{ooov} = ")
print(final)
# vvvv piece
ket = ketEdea1("vir", "vir")
bra = braEdea1("vir", "vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
print("W_{vvvv} = ")
print(final)
# oooo piece
ket = ketEdip1("occ", "occ")
bra = braEdip1("occ", "occ")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((2, 3, 0, 1))
print("W_{oooo} = ")
print(final)
# voov piece
ket = ketE1("occ", "vir")
bra = braE1("occ", "vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((0, 2, 1, 3))
print("W_{voov} = ")
print(final)
# vvvo piece
ket = ketEea1("vir")
bra = braEea2("occ", "vir", "vir")
S = bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((0, 1, 3, 2))
print("W_{vvvo} = ")
print(final)
# ovoo piece
ket = ketEip1("occ")
bra = braEip2("occ", "occ", "vir")
S = -1*bra*Hbar*ket
out = apply_wick(S)
out.resolve()
final = AExpression(Ex=out)
final.sort_tensors()
final = final.get_connected()
final.transpose((3, 0, 1, 2))
print("W_{ovoo} = ")
print(final) | 0.291989 | 0.333992 |
import shlex
import subprocess
import doit # type: ignore
from buildchain import config
from buildchain import constants
from buildchain import types
from buildchain import utils
def task__vagrantkey() -> types.TaskDict:
"""Generate a SSH key pair in the .vagrant folder."""
def mkdir_dot_vagrant() -> None:
constants.VAGRANT_ROOT.mkdir(exist_ok=True)
keygen = [
"ssh-keygen",
"-t",
"rsa",
"-b",
"4096",
"-N",
"",
"-f",
str(constants.VAGRANT_SSH_KEY_PAIR),
"-C",
"doit",
]
return {
"actions": [mkdir_dot_vagrant, keygen],
"targets": [constants.VAGRANT_SSH_KEY_PAIR],
"uptodate": [True],
}
def task__vagrant_up_noprov() -> types.TaskDict:
"""Run `vagrant up` without provisioning a development environment."""
cmd = [config.ExtCommand.VAGRANT.value, "up"]
cmd.extend(map(shlex.quote, config.VAGRANT_UP_ARGS))
cmd.append("--no-provision")
return {
"actions": [doit.tools.LongRunning(" ".join(cmd))],
"file_dep": [],
"task_dep": ["check_for:vagrant"],
"uptodate": [False],
}
def task__vagrant_snapshot() -> types.TaskDict:
"""Snapshot development environment."""
def check_snapshot_existence() -> bool:
snapshots: bytes = subprocess.check_output(
[config.ExtCommand.VAGRANT.value, "snapshot", "list"]
)
return config.VAGRANT_SNAPSHOT_NAME in snapshots.decode("utf-8")
vagranthalt = [config.ExtCommand.VAGRANT.value, "halt", "bootstrap"]
vagrantsnap = [
config.ExtCommand.VAGRANT.value,
"snapshot",
"save",
"--force",
"bootstrap",
config.VAGRANT_SNAPSHOT_NAME,
]
return {
"actions": [vagranthalt, vagrantsnap],
"task_dep": ["_vagrant_up_noprov", "check_for:vagrant"],
"uptodate": [check_snapshot_existence],
"verbosity": 2,
}
def task_vagrant_restore() -> types.TaskDict:
"""Restore development environment snapshot."""
cmd = [
config.ExtCommand.VAGRANT.value,
"snapshot",
"restore",
"bootstrap",
config.VAGRANT_SNAPSHOT_NAME,
]
return {
"actions": [cmd],
"uptodate": [False],
"verbosity": 2,
"task_dep": ["check_for:vagrant"],
}
def task_vagrant_up() -> types.TaskDict:
"""Run `vagrant up` to (re-)provision a development environment."""
vagrantup = [config.ExtCommand.VAGRANT.value, "up"]
vagrantup.extend(map(shlex.quote, config.VAGRANT_UP_ARGS))
vagrantdestroy = [config.ExtCommand.VAGRANT.value, "destroy", "--force"]
return {
"actions": [doit.tools.LongRunning(" ".join(vagrantup))],
"file_dep": [constants.VAGRANT_SSH_KEY_PAIR],
"task_dep": ["check_for:vagrant", "populate_iso", "_vagrant_snapshot"],
"uptodate": [False],
"clean": [vagrantdestroy],
"verbosity": 2,
}
__all__ = utils.export_only_tasks(__name__) | buildchain/buildchain/vagrant.py | import shlex
import subprocess
import doit # type: ignore
from buildchain import config
from buildchain import constants
from buildchain import types
from buildchain import utils
def task__vagrantkey() -> types.TaskDict:
"""Generate a SSH key pair in the .vagrant folder."""
def mkdir_dot_vagrant() -> None:
constants.VAGRANT_ROOT.mkdir(exist_ok=True)
keygen = [
"ssh-keygen",
"-t",
"rsa",
"-b",
"4096",
"-N",
"",
"-f",
str(constants.VAGRANT_SSH_KEY_PAIR),
"-C",
"doit",
]
return {
"actions": [mkdir_dot_vagrant, keygen],
"targets": [constants.VAGRANT_SSH_KEY_PAIR],
"uptodate": [True],
}
def task__vagrant_up_noprov() -> types.TaskDict:
"""Run `vagrant up` without provisioning a development environment."""
cmd = [config.ExtCommand.VAGRANT.value, "up"]
cmd.extend(map(shlex.quote, config.VAGRANT_UP_ARGS))
cmd.append("--no-provision")
return {
"actions": [doit.tools.LongRunning(" ".join(cmd))],
"file_dep": [],
"task_dep": ["check_for:vagrant"],
"uptodate": [False],
}
def task__vagrant_snapshot() -> types.TaskDict:
"""Snapshot development environment."""
def check_snapshot_existence() -> bool:
snapshots: bytes = subprocess.check_output(
[config.ExtCommand.VAGRANT.value, "snapshot", "list"]
)
return config.VAGRANT_SNAPSHOT_NAME in snapshots.decode("utf-8")
vagranthalt = [config.ExtCommand.VAGRANT.value, "halt", "bootstrap"]
vagrantsnap = [
config.ExtCommand.VAGRANT.value,
"snapshot",
"save",
"--force",
"bootstrap",
config.VAGRANT_SNAPSHOT_NAME,
]
return {
"actions": [vagranthalt, vagrantsnap],
"task_dep": ["_vagrant_up_noprov", "check_for:vagrant"],
"uptodate": [check_snapshot_existence],
"verbosity": 2,
}
def task_vagrant_restore() -> types.TaskDict:
"""Restore development environment snapshot."""
cmd = [
config.ExtCommand.VAGRANT.value,
"snapshot",
"restore",
"bootstrap",
config.VAGRANT_SNAPSHOT_NAME,
]
return {
"actions": [cmd],
"uptodate": [False],
"verbosity": 2,
"task_dep": ["check_for:vagrant"],
}
def task_vagrant_up() -> types.TaskDict:
"""Run `vagrant up` to (re-)provision a development environment."""
vagrantup = [config.ExtCommand.VAGRANT.value, "up"]
vagrantup.extend(map(shlex.quote, config.VAGRANT_UP_ARGS))
vagrantdestroy = [config.ExtCommand.VAGRANT.value, "destroy", "--force"]
return {
"actions": [doit.tools.LongRunning(" ".join(vagrantup))],
"file_dep": [constants.VAGRANT_SSH_KEY_PAIR],
"task_dep": ["check_for:vagrant", "populate_iso", "_vagrant_snapshot"],
"uptodate": [False],
"clean": [vagrantdestroy],
"verbosity": 2,
}
__all__ = utils.export_only_tasks(__name__) | 0.535827 | 0.31871 |
import numpy as np
from astropy.units import Quantity, Unit
from astropy.time import Time
from astropy.io.votable.converters import (
get_converter as get_votable_converter)
from .exceptions import DALServiceError
NUMERIC_DATATYPES = {'short', 'int', 'long', 'float', 'double'}
def find_param_by_keyword(keyword, params):
"""
Searches for a specific param by keyword.
This function will try to look for the keyword as-is first, and then tries
to find the uppercase'd version of the keyword.
"""
if keyword in params:
return params[keyword]
keyword = keyword.upper()
if keyword in params:
return params[keyword]
raise KeyError('No param named {} defined'.format(keyword))
registry = dict()
def xtype(name):
def decorate(cls):
registry[name] = cls
return cls
return decorate
def unify_value(func):
"""
Decorator for serialize method to do unit conversion on input value.
The decorator converts the input value to the unit in the input param.
"""
def wrapper(self, value):
if self._param.unit:
value = Quantity(value)
if not value.unit.to_string():
value = value * Unit(self._param.unit)
else:
value = value.to(self._param.unit)
if isinstance(value, Quantity):
value = value.value
return func(self, value)
return wrapper
def get_converter(param):
if param.xtype in registry:
return registry[param.xtype](param)
if param.datatype in NUMERIC_DATATYPES:
return Number(param)
return Converter(param)
class Converter:
"""
Base class for all converters. Each subclass handles the conversion of a
input value based on a specific xtype.
"""
def __init__(self, param):
self._param = param
def serialize(self, value):
"""
Serialize for use in DAL Queries
"""
return str(value)
class Number(Converter):
def __init__(self, param):
if param.datatype not in {'short', 'int', 'long', 'float', 'double'}:
pass
super().__init__(param)
@unify_value
def serialize(self, value):
"""
Serialize for use in DAL Queries
"""
return get_votable_converter(self._param).output(
value, np.zeros_like(value))
@xtype('timestamp')
class Timestamp(Converter):
def __init__(self, param):
if param.datatype != 'char':
raise DALServiceError('Datatype is not char')
super().__init__(param)
def serialize(self, value):
"""
Serialize time values for use in DAL Queries
"""
value = Time(value)
if value.size == 1:
return value.isot
else:
raise DALServiceError('Expecting a scalar time value')
@xtype('interval')
class Interval(Number):
def __init__(self, param):
try:
arraysize = int(param.arraysize)
if arraysize % 2:
raise DALServiceError('Arraysize is not even')
except ValueError:
raise DALServiceError('Arraysize is not even')
super().__init__(param)
@unify_value
def serialize(self, value):
size = np.size(value)
if size % 2:
raise DALServiceError('Interval size is not even')
return super().serialize(value)
@xtype('point')
class Point(Number):
def __init__(self, param):
try:
arraysize = int(param.arraysize)
if arraysize != 2:
raise DALServiceError('Point arraysize must be 2')
except ValueError:
raise DALServiceError('Point arraysize must be 2')
super().__init__(param)
@unify_value
def serialize(self, value):
size = np.size(value)
if size != 2:
raise DALServiceError('Point size must be 2')
return super().serialize(value)
@xtype('circle')
class Circle(Number):
def __init__(self, param):
arraysize = int(param.arraysize)
if arraysize != 3:
raise DALServiceError('Circle arraysize must be 3')
super().__init__(param)
@unify_value
def serialize(self, value):
size = np.size(value)
if size != 3:
raise DALServiceError('Circle size must be 3')
return super().serialize(value)
@xtype('polygon')
class Polygon(Number):
def __init__(self, param):
try:
arraysize = int(param.arraysize)
if arraysize % 3:
raise DALServiceError('Arraysize is not a multiple of 3')
except ValueError:
if param.arraysize != '*':
raise DALServiceError('Arraysize is not a multiple of 3')
super().__init__(param)
@unify_value
def serialize(self, value):
size = np.size(value)
try:
if size % 3:
raise DALServiceError('Size is not a multiple of 3')
except ValueError:
raise DALServiceError('Size is not a multiple of 3')
return super().serialize(value) | pyvo/dal/params.py | import numpy as np
from astropy.units import Quantity, Unit
from astropy.time import Time
from astropy.io.votable.converters import (
get_converter as get_votable_converter)
from .exceptions import DALServiceError
NUMERIC_DATATYPES = {'short', 'int', 'long', 'float', 'double'}
def find_param_by_keyword(keyword, params):
"""
Searches for a specific param by keyword.
This function will try to look for the keyword as-is first, and then tries
to find the uppercase'd version of the keyword.
"""
if keyword in params:
return params[keyword]
keyword = keyword.upper()
if keyword in params:
return params[keyword]
raise KeyError('No param named {} defined'.format(keyword))
registry = dict()
def xtype(name):
def decorate(cls):
registry[name] = cls
return cls
return decorate
def unify_value(func):
"""
Decorator for serialize method to do unit conversion on input value.
The decorator converts the input value to the unit in the input param.
"""
def wrapper(self, value):
if self._param.unit:
value = Quantity(value)
if not value.unit.to_string():
value = value * Unit(self._param.unit)
else:
value = value.to(self._param.unit)
if isinstance(value, Quantity):
value = value.value
return func(self, value)
return wrapper
def get_converter(param):
if param.xtype in registry:
return registry[param.xtype](param)
if param.datatype in NUMERIC_DATATYPES:
return Number(param)
return Converter(param)
class Converter:
"""
Base class for all converters. Each subclass handles the conversion of a
input value based on a specific xtype.
"""
def __init__(self, param):
self._param = param
def serialize(self, value):
"""
Serialize for use in DAL Queries
"""
return str(value)
class Number(Converter):
def __init__(self, param):
if param.datatype not in {'short', 'int', 'long', 'float', 'double'}:
pass
super().__init__(param)
@unify_value
def serialize(self, value):
"""
Serialize for use in DAL Queries
"""
return get_votable_converter(self._param).output(
value, np.zeros_like(value))
@xtype('timestamp')
class Timestamp(Converter):
def __init__(self, param):
if param.datatype != 'char':
raise DALServiceError('Datatype is not char')
super().__init__(param)
def serialize(self, value):
"""
Serialize time values for use in DAL Queries
"""
value = Time(value)
if value.size == 1:
return value.isot
else:
raise DALServiceError('Expecting a scalar time value')
@xtype('interval')
class Interval(Number):
def __init__(self, param):
try:
arraysize = int(param.arraysize)
if arraysize % 2:
raise DALServiceError('Arraysize is not even')
except ValueError:
raise DALServiceError('Arraysize is not even')
super().__init__(param)
@unify_value
def serialize(self, value):
size = np.size(value)
if size % 2:
raise DALServiceError('Interval size is not even')
return super().serialize(value)
@xtype('point')
class Point(Number):
def __init__(self, param):
try:
arraysize = int(param.arraysize)
if arraysize != 2:
raise DALServiceError('Point arraysize must be 2')
except ValueError:
raise DALServiceError('Point arraysize must be 2')
super().__init__(param)
@unify_value
def serialize(self, value):
size = np.size(value)
if size != 2:
raise DALServiceError('Point size must be 2')
return super().serialize(value)
@xtype('circle')
class Circle(Number):
def __init__(self, param):
arraysize = int(param.arraysize)
if arraysize != 3:
raise DALServiceError('Circle arraysize must be 3')
super().__init__(param)
@unify_value
def serialize(self, value):
size = np.size(value)
if size != 3:
raise DALServiceError('Circle size must be 3')
return super().serialize(value)
@xtype('polygon')
class Polygon(Number):
def __init__(self, param):
try:
arraysize = int(param.arraysize)
if arraysize % 3:
raise DALServiceError('Arraysize is not a multiple of 3')
except ValueError:
if param.arraysize != '*':
raise DALServiceError('Arraysize is not a multiple of 3')
super().__init__(param)
@unify_value
def serialize(self, value):
size = np.size(value)
try:
if size % 3:
raise DALServiceError('Size is not a multiple of 3')
except ValueError:
raise DALServiceError('Size is not a multiple of 3')
return super().serialize(value) | 0.8321 | 0.409103 |
from morphotactics.morphotactics import compile
from morphotactics.slot import Slot
from morphotactics.stem_guesser import StemGuesser
import pynini
import pywrapfst
from IPython.display import SVG, display
# helper function that transduces input_str belonging to lower alphabet to string in upper alphabet
def analyze(fst, input_str):
return pynini.compose(input_str, fst).string()
# this is for Puebla Na:wat, not for Classical Nahuatl
nawat_alphabet = {
'C': ['ch', 'h', 'k', 'kw', 'l', 'm', 'n', 'p', 's', 't', 'ts', 'w', 'x', 'y'],
'V': ['a', 'e', 'i', 'o', 'a:', 'e:', 'i:', 'o:']
}
# A simple Na:wat noun parser that detects nouns with a given stem structure.
# stem is a string containing a simple regex used to construct a StemGuesser.
# This was used to debug how compile handles StemGuessers.
def parser_from_stem(stem):
return compile({
StemGuesser(stem, 'NounStem', ['Absolutive'], alphabet=nawat_alphabet, start=True),
Slot('Absolutive',
[
('-t', 't', [], 0.0),
('-ti', 'ti', [], 0.0),
('l-li', 'li', [], 0.0) # This case actually has l in the stem
]),
})
def _test_stem(stem):
assert analyze(parser_from_stem(stem), 'o:kichti') == 'o:kich-ti'
def test_okich():
_test_stem('o:kich')
def test_dot_plus():
_test_stem('o:ki.+')
def test_dot_star():
_test_stem('o:ki.*')
def test_ch_question():
_test_stem('o:ki(ch)?')
def test_ch_plus():
_test_stem('o:ki(ch)+')
def test_ch_star():
_test_stem('o:ki(ch)*')
def test_ch_ch_question():
_test_stem('o:kich(ch)?')
def test_ch_ch_star():
_test_stem('o:kich(ch)*')
# A simple Na:wat noun parser that accepts stems in any form. In reality, Na:wat noun
# stems must have more than one mora, but it's meaningless to add this restriction for
# our purposes.
sg_noun_parser = compile({
StemGuesser('.+', 'NounStem', ['Absolutive'], alphabet=nawat_alphabet, start=True),
Slot('Absolutive',
[
('-t', 't', [], 0.0),
('-ti', 'ti', [], 0.0),
('l-li', 'li', [], 0.0) # This case actually has l in the stem
]),
Slot('Possession',
[
('no-', 'no', ['PossessedNounStem'], 0.0),
('mo-', 'mo', ['PossessedNounStem'], 0.0),
('i:-', 'i:', ['PossessedNounStem'], 0.0),
('to-', 'to', ['PossessedNounStem'], 0.0),
], start=True),
StemGuesser('.*', 'PossessedNounStem', ['Inalienable', 'Alienable'], alphabet=nawat_alphabet),
Slot('Inalienable', [('-yo', 'yo', [], 0.0)]),
Slot('Alienable', [('-w', 'w', [], 0.0), ('-0', '', [], 0.0)])
})
# Testing the noun parser. More tests will be added in the near future.
def test_toy_nawat_sg_noun_parser():
assert analyze(sg_noun_parser, 'o:kichti') == 'o:kich-ti' | tests/test_toy_nawat_nouns.py | from morphotactics.morphotactics import compile
from morphotactics.slot import Slot
from morphotactics.stem_guesser import StemGuesser
import pynini
import pywrapfst
from IPython.display import SVG, display
# helper function that transduces input_str belonging to lower alphabet to string in upper alphabet
def analyze(fst, input_str):
return pynini.compose(input_str, fst).string()
# this is for Puebla Na:wat, not for Classical Nahuatl
nawat_alphabet = {
'C': ['ch', 'h', 'k', 'kw', 'l', 'm', 'n', 'p', 's', 't', 'ts', 'w', 'x', 'y'],
'V': ['a', 'e', 'i', 'o', 'a:', 'e:', 'i:', 'o:']
}
# A simple Na:wat noun parser that detects nouns with a given stem structure.
# stem is a string containing a simple regex used to construct a StemGuesser.
# This was used to debug how compile handles StemGuessers.
def parser_from_stem(stem):
return compile({
StemGuesser(stem, 'NounStem', ['Absolutive'], alphabet=nawat_alphabet, start=True),
Slot('Absolutive',
[
('-t', 't', [], 0.0),
('-ti', 'ti', [], 0.0),
('l-li', 'li', [], 0.0) # This case actually has l in the stem
]),
})
def _test_stem(stem):
assert analyze(parser_from_stem(stem), 'o:kichti') == 'o:kich-ti'
def test_okich():
_test_stem('o:kich')
def test_dot_plus():
_test_stem('o:ki.+')
def test_dot_star():
_test_stem('o:ki.*')
def test_ch_question():
_test_stem('o:ki(ch)?')
def test_ch_plus():
_test_stem('o:ki(ch)+')
def test_ch_star():
_test_stem('o:ki(ch)*')
def test_ch_ch_question():
_test_stem('o:kich(ch)?')
def test_ch_ch_star():
_test_stem('o:kich(ch)*')
# A simple Na:wat noun parser that accepts stems in any form. In reality, Na:wat noun
# stems must have more than one mora, but it's meaningless to add this restriction for
# our purposes.
sg_noun_parser = compile({
StemGuesser('.+', 'NounStem', ['Absolutive'], alphabet=nawat_alphabet, start=True),
Slot('Absolutive',
[
('-t', 't', [], 0.0),
('-ti', 'ti', [], 0.0),
('l-li', 'li', [], 0.0) # This case actually has l in the stem
]),
Slot('Possession',
[
('no-', 'no', ['PossessedNounStem'], 0.0),
('mo-', 'mo', ['PossessedNounStem'], 0.0),
('i:-', 'i:', ['PossessedNounStem'], 0.0),
('to-', 'to', ['PossessedNounStem'], 0.0),
], start=True),
StemGuesser('.*', 'PossessedNounStem', ['Inalienable', 'Alienable'], alphabet=nawat_alphabet),
Slot('Inalienable', [('-yo', 'yo', [], 0.0)]),
Slot('Alienable', [('-w', 'w', [], 0.0), ('-0', '', [], 0.0)])
})
# Testing the noun parser. More tests will be added in the near future.
def test_toy_nawat_sg_noun_parser():
assert analyze(sg_noun_parser, 'o:kichti') == 'o:kich-ti' | 0.571527 | 0.555254 |
from abc import ABC, abstractmethod
from typing import Sequence, Union, Tuple, Any
import re
import numpy as np
from mdde.agent.abc import NodeAgentMapping
from mdde.agent.enums import EActionResult
from mdde.config import ConfigEnvironment
from mdde.registry.protocol import PRegistryReadClient, PRegistryWriteClient
class ABCAgent(ABC):
"""
Base class for the agents definition.
Every agent must have a unique id assigned in the constructor. Additionally, every agent at runtime has access to
the read instructions and write API of the registry to facilitate actions execution.
"""
DEFAULT_GROUP: str = 'agent'
"""Default agent group name."""
@abstractmethod
def __init__(self,
agent_name: str,
agent_id: int,
data_node_ids: Union[Sequence[str], str],
group: str = DEFAULT_GROUP
):
"""
Constructor.
:param agent_name: Agent name (for logging and debugging).
:param data_node_ids: A set of the data node IDs associated with the agent.
:param agent_id: Unique integer id assigned to the agent (passed as an id to the learner).
:param group: Name of the group to which the agent belongs. Only letters and digits are allowed, special
characters, punctuation and spaces will be stripped.
"""
if agent_id is None:
raise TypeError("Agent ID must of type int")
if data_node_ids is None:
raise TypeError("Data node ID must of type String")
self._agent_name: str = agent_name if agent_name else ""
"""Name of the agent. Used for information and logging only"""
self._agent_id: int = agent_id
"""ID of the agent, must be unique within the current scenario run"""
if group is None:
raise TypeError("Agent group can't be None.")
self._group: str = re.sub('[^A-Za-z0-9_]+', '', group)
if not self._group:
raise ValueError("Agent group can't be empty")
self._filter_obs: bool = True
"""
If True, agent is expecting to receive full observation space for processing it
using :py:func:`filter_observation`.
If False,
"""
self._config: ConfigEnvironment = None
"""Environment configuration"""
# At least one data node must be specified
if len(data_node_ids) < 1:
raise ValueError("The agent must be associated with at lest one data node")
if not isinstance(data_node_ids, (str, bytes, bytearray)):
# Duplicates are not allowed in the data nodes list
data_node_ids_set = set(data_node_ids)
if len(data_node_ids_set) != len(data_node_ids):
raise ValueError("The agent data node ids list contains duplicates")
self._data_node_id: Tuple[str, ...] = tuple(data_node_ids)
else:
self._data_node_id: Tuple[str, ...] = (data_node_ids,)
# Read and write access to the registry.
# These properties will have the implementation of the protocols assigned to them at the time of execution,
# Use these to create actions affecting the registry (write) and the agent observation space (read).
self._registry_read: PRegistryReadClient = None
"""Read access to the registry. Guaranteed to be filled by the environment before any calls."""
self._registry_write: PRegistryWriteClient = None
"""Write access to the registry. Guaranteed to be filled by the environment before any calls."""
self._experiment_id: str = None
"""Experiment ID. Guaranteed to be filled by the environment before any calls."""
self.done: bool = False
"""'Done flag. Set to True if the agent should no longer do anything within the current episode."""
@property
def id(self) -> int:
"""
Get the ID of the agent, unique within the running scenario.
:return: Numerical agent ID.
"""
return self._agent_id
@property
def name(self) -> str:
"""
Get the name of the agent, might be used in logging for simplifying identification.
:return: String agent name.
"""
return self._agent_name
@property
def experiment_id(self) -> str:
"""
Experiment ID to which this agent is attached to.
:return: Short alphanumeric string.
"""
return self._experiment_id
@property
def group(self) -> str:
"""
Group tittle to which agent belongs.
:return: String group name. All spaces and special characters are stripped to ensure better compatibility with
RL frameworks that would use this property.
"""
return self._group
def attach_registry(self, registry_read: PRegistryReadClient, registry_write: PRegistryWriteClient) -> None:
"""
Method is used by the environment to provide agent access to the registry.
Should not be called by any user defined code.
:param registry_write: Write access to the registry
:param registry_read: Read-only access to the registry.
"""
self._registry_read = registry_read
self._registry_write = registry_write
def attach_to_experiment(self, experiment_id: str) -> None:
"""
Method is used by the environment to provide agent with the relevant experiment ID.
Should not be called by any user defined code.
:param experiment_id: Short alphanumeric experiment ID.
"""
self._experiment_id = experiment_id
def inject_env_config(self, config: ConfigEnvironment) -> None:
"""
Environment configuration injected to the agent.
:param config: ConfigEnvironment
"""
self._config = config
def reset(self) -> None:
"""
Method is called by the Scenario when the Environment being reset.
By default, sets the agents done flag to False. Override this method if additional cleanup is required.
"""
self.done = False
@property
def obs_filter(self) -> bool:
"""
If True, scenario should invoke :py:func:`.ABCAgent.filter_observation` to get the agent observations.
If it's False, :py:func:`.ABCAgent.form_observation` should be used.
If there are multiple agents having full observation access to the environment, it makes sense to retrieve it
once in the scenario and then mutate by the agents when needed.
"""
return self._filter_obs
@property
def data_node_ids(self) -> Tuple[str, ...]:
"""
Get the data node IDs (string) associated with this agent
:return: Tuple of the data node ID strings managed by the agent
"""
return self._data_node_id
@property
def mapped_data_node_ids(self) -> Tuple[NodeAgentMapping, ...]:
"""Data nodes managed by the agent as NodeAgentMapping tuples
:return: Ordered tuple of NodeAgentMapping tuples
"""
return tuple(NodeAgentMapping(self.id, node) for node in self.data_node_ids)
@abstractmethod
def get_actions(self) -> int:
"""
Get the number of actions from 0 to n, each discrete number within the range correspond to a specific action.
:return: Number of available actions N_a. Each action is mapped to an index within range [0, N_a)
"""
raise NotImplementedError
def get_actions_described(self) -> Any:
"""Retrieve meaningful read-only described actions sorted or otherwise conforming to their indexes used by
the RL algorithms. Generally not useful for the 'proper' DRL. We also don't enforce any specific return
type as the composition of the actions and the describing object might differ drastically from one agent
to another. Use with care and only in the specific instances where you're sure it's needed (meta-data, stats,
etc.). It's also optional for implementation."""
raise NotImplementedError
@abstractmethod
def create_action_space(self,
nodes: Tuple[NodeAgentMapping, ...],
fragments: Sequence[str],
obs: np.ndarray
) -> int:
"""
Override this method to create action space associated with this agent.
When this method is invoked, attach_registry() method was already called so the agent already should have access
to the registry. Additionally, this method is parameterized with the full observation space that was generated
right after the initialization of the environment.
Actions provided by the agent can rely on the supplied parameters or be hard coded. Implement this method in
accordance to the simulated scenario.
:param nodes: Description of the observation space nodes
:param fragments: Ordered sequence of fragments
:param obs: Observation space
:return: Number of available actions N_a. Each action is mapped to an index within range [0, N_a)
"""
raise NotImplementedError
@abstractmethod
def do_action(self, action_id: int) -> EActionResult:
"""
Execute an action corresponding to the specified action id [0,self.get_actions())
:param action_id: Action id as defined in the action space
:return: EActionResult for an action that was processed correctly. If a general error (error without an MDDE
error code is returned, then a general exception must be raised instead)
"""
raise NotImplementedError
@abstractmethod
def filter_observation(self,
obs_descr: Tuple[NodeAgentMapping, ...],
fragments: Tuple[str, ...],
obs: np.array) -> np.ndarray:
"""
Get observation space for the specific agent.
:param obs_descr: Observation space description.
:param fragments: Ordered list of fragments.
:param obs: full_observation: Full observation space provided by the environment.
:return: Agents can have full or limited observation spaces. In case of the latter, provide the filtering logic
within this function and return a filtered out observation space.
"""
raise NotImplementedError
@abstractmethod
def form_observation(self, **kwargs) -> np.ndarray:
"""
Override if the agent should form it's own observations, independent of the full space observations returned
by the environment.
Use the read access to the registry in order to form the observation space.
:param kwargs: (optional) Scenario specific parameters.
:return: Observation space numpy array.
"""
raise NotImplementedError | mdde/core/mdde/agent/abc/abc_agent.py | from abc import ABC, abstractmethod
from typing import Sequence, Union, Tuple, Any
import re
import numpy as np
from mdde.agent.abc import NodeAgentMapping
from mdde.agent.enums import EActionResult
from mdde.config import ConfigEnvironment
from mdde.registry.protocol import PRegistryReadClient, PRegistryWriteClient
class ABCAgent(ABC):
"""
Base class for the agents definition.
Every agent must have a unique id assigned in the constructor. Additionally, every agent at runtime has access to
the read instructions and write API of the registry to facilitate actions execution.
"""
DEFAULT_GROUP: str = 'agent'
"""Default agent group name."""
@abstractmethod
def __init__(self,
agent_name: str,
agent_id: int,
data_node_ids: Union[Sequence[str], str],
group: str = DEFAULT_GROUP
):
"""
Constructor.
:param agent_name: Agent name (for logging and debugging).
:param data_node_ids: A set of the data node IDs associated with the agent.
:param agent_id: Unique integer id assigned to the agent (passed as an id to the learner).
:param group: Name of the group to which the agent belongs. Only letters and digits are allowed, special
characters, punctuation and spaces will be stripped.
"""
if agent_id is None:
raise TypeError("Agent ID must of type int")
if data_node_ids is None:
raise TypeError("Data node ID must of type String")
self._agent_name: str = agent_name if agent_name else ""
"""Name of the agent. Used for information and logging only"""
self._agent_id: int = agent_id
"""ID of the agent, must be unique within the current scenario run"""
if group is None:
raise TypeError("Agent group can't be None.")
self._group: str = re.sub('[^A-Za-z0-9_]+', '', group)
if not self._group:
raise ValueError("Agent group can't be empty")
self._filter_obs: bool = True
"""
If True, agent is expecting to receive full observation space for processing it
using :py:func:`filter_observation`.
If False,
"""
self._config: ConfigEnvironment = None
"""Environment configuration"""
# At least one data node must be specified
if len(data_node_ids) < 1:
raise ValueError("The agent must be associated with at lest one data node")
if not isinstance(data_node_ids, (str, bytes, bytearray)):
# Duplicates are not allowed in the data nodes list
data_node_ids_set = set(data_node_ids)
if len(data_node_ids_set) != len(data_node_ids):
raise ValueError("The agent data node ids list contains duplicates")
self._data_node_id: Tuple[str, ...] = tuple(data_node_ids)
else:
self._data_node_id: Tuple[str, ...] = (data_node_ids,)
# Read and write access to the registry.
# These properties will have the implementation of the protocols assigned to them at the time of execution,
# Use these to create actions affecting the registry (write) and the agent observation space (read).
self._registry_read: PRegistryReadClient = None
"""Read access to the registry. Guaranteed to be filled by the environment before any calls."""
self._registry_write: PRegistryWriteClient = None
"""Write access to the registry. Guaranteed to be filled by the environment before any calls."""
self._experiment_id: str = None
"""Experiment ID. Guaranteed to be filled by the environment before any calls."""
self.done: bool = False
"""'Done flag. Set to True if the agent should no longer do anything within the current episode."""
@property
def id(self) -> int:
"""
Get the ID of the agent, unique within the running scenario.
:return: Numerical agent ID.
"""
return self._agent_id
@property
def name(self) -> str:
"""
Get the name of the agent, might be used in logging for simplifying identification.
:return: String agent name.
"""
return self._agent_name
@property
def experiment_id(self) -> str:
"""
Experiment ID to which this agent is attached to.
:return: Short alphanumeric string.
"""
return self._experiment_id
@property
def group(self) -> str:
"""
Group tittle to which agent belongs.
:return: String group name. All spaces and special characters are stripped to ensure better compatibility with
RL frameworks that would use this property.
"""
return self._group
def attach_registry(self, registry_read: PRegistryReadClient, registry_write: PRegistryWriteClient) -> None:
"""
Method is used by the environment to provide agent access to the registry.
Should not be called by any user defined code.
:param registry_write: Write access to the registry
:param registry_read: Read-only access to the registry.
"""
self._registry_read = registry_read
self._registry_write = registry_write
def attach_to_experiment(self, experiment_id: str) -> None:
"""
Method is used by the environment to provide agent with the relevant experiment ID.
Should not be called by any user defined code.
:param experiment_id: Short alphanumeric experiment ID.
"""
self._experiment_id = experiment_id
def inject_env_config(self, config: ConfigEnvironment) -> None:
"""
Environment configuration injected to the agent.
:param config: ConfigEnvironment
"""
self._config = config
def reset(self) -> None:
"""
Method is called by the Scenario when the Environment being reset.
By default, sets the agents done flag to False. Override this method if additional cleanup is required.
"""
self.done = False
@property
def obs_filter(self) -> bool:
"""
If True, scenario should invoke :py:func:`.ABCAgent.filter_observation` to get the agent observations.
If it's False, :py:func:`.ABCAgent.form_observation` should be used.
If there are multiple agents having full observation access to the environment, it makes sense to retrieve it
once in the scenario and then mutate by the agents when needed.
"""
return self._filter_obs
@property
def data_node_ids(self) -> Tuple[str, ...]:
"""
Get the data node IDs (string) associated with this agent
:return: Tuple of the data node ID strings managed by the agent
"""
return self._data_node_id
@property
def mapped_data_node_ids(self) -> Tuple[NodeAgentMapping, ...]:
"""Data nodes managed by the agent as NodeAgentMapping tuples
:return: Ordered tuple of NodeAgentMapping tuples
"""
return tuple(NodeAgentMapping(self.id, node) for node in self.data_node_ids)
@abstractmethod
def get_actions(self) -> int:
"""
Get the number of actions from 0 to n, each discrete number within the range correspond to a specific action.
:return: Number of available actions N_a. Each action is mapped to an index within range [0, N_a)
"""
raise NotImplementedError
def get_actions_described(self) -> Any:
"""Retrieve meaningful read-only described actions sorted or otherwise conforming to their indexes used by
the RL algorithms. Generally not useful for the 'proper' DRL. We also don't enforce any specific return
type as the composition of the actions and the describing object might differ drastically from one agent
to another. Use with care and only in the specific instances where you're sure it's needed (meta-data, stats,
etc.). It's also optional for implementation."""
raise NotImplementedError
@abstractmethod
def create_action_space(self,
nodes: Tuple[NodeAgentMapping, ...],
fragments: Sequence[str],
obs: np.ndarray
) -> int:
"""
Override this method to create action space associated with this agent.
When this method is invoked, attach_registry() method was already called so the agent already should have access
to the registry. Additionally, this method is parameterized with the full observation space that was generated
right after the initialization of the environment.
Actions provided by the agent can rely on the supplied parameters or be hard coded. Implement this method in
accordance to the simulated scenario.
:param nodes: Description of the observation space nodes
:param fragments: Ordered sequence of fragments
:param obs: Observation space
:return: Number of available actions N_a. Each action is mapped to an index within range [0, N_a)
"""
raise NotImplementedError
@abstractmethod
def do_action(self, action_id: int) -> EActionResult:
"""
Execute an action corresponding to the specified action id [0,self.get_actions())
:param action_id: Action id as defined in the action space
:return: EActionResult for an action that was processed correctly. If a general error (error without an MDDE
error code is returned, then a general exception must be raised instead)
"""
raise NotImplementedError
@abstractmethod
def filter_observation(self,
obs_descr: Tuple[NodeAgentMapping, ...],
fragments: Tuple[str, ...],
obs: np.array) -> np.ndarray:
"""
Get observation space for the specific agent.
:param obs_descr: Observation space description.
:param fragments: Ordered list of fragments.
:param obs: full_observation: Full observation space provided by the environment.
:return: Agents can have full or limited observation spaces. In case of the latter, provide the filtering logic
within this function and return a filtered out observation space.
"""
raise NotImplementedError
@abstractmethod
def form_observation(self, **kwargs) -> np.ndarray:
"""
Override if the agent should form it's own observations, independent of the full space observations returned
by the environment.
Use the read access to the registry in order to form the observation space.
:param kwargs: (optional) Scenario specific parameters.
:return: Observation space numpy array.
"""
raise NotImplementedError | 0.941808 | 0.350088 |
class CategoryAttribute(Attribute, _Attribute):
"""
Specifies the name of the category in which to group the property or event when displayed in a System.Windows.Forms.PropertyGrid control set to Categorized mode.
CategoryAttribute()
CategoryAttribute(category: str)
"""
def Equals(self, obj):
"""
Equals(self: CategoryAttribute,obj: object) -> bool
Returns whether the value of the given object is equal to the current
System.ComponentModel.CategoryAttribute..
obj: The object to test the value equality of.
Returns: true if the value of the given object is equal to that of the current; otherwise,false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: CategoryAttribute) -> int
Returns the hash code for this attribute.
Returns: A 32-bit signed integer hash code.
"""
pass
def GetLocalizedString(self, *args):
"""
GetLocalizedString(self: CategoryAttribute,value: str) -> str
Looks up the localized name of the specified category.
value: The identifer for the category to look up.
Returns: The localized name of the category,or null if a localized name does not exist.
"""
pass
def IsDefaultAttribute(self):
"""
IsDefaultAttribute(self: CategoryAttribute) -> bool
Determines if this attribute is the default.
Returns: true if the attribute is the default value for this attribute class; otherwise,false.
"""
pass
def __eq__(self, *args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, category=None):
"""
__new__(cls: type)
__new__(cls: type,category: str)
"""
pass
def __ne__(self, *args):
pass
Category = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the name of the category for the property or event that this attribute is applied to.
Get: Category(self: CategoryAttribute) -> str
"""
Action = None
Appearance = None
Asynchronous = None
Behavior = None
Data = None
Default = None
Design = None
DragDrop = None
Focus = None
Format = None
Key = None
Layout = None
Mouse = None
WindowStyle = None | release/stubs.min/System/ComponentModel/__init___parts/CategoryAttribute.py | class CategoryAttribute(Attribute, _Attribute):
"""
Specifies the name of the category in which to group the property or event when displayed in a System.Windows.Forms.PropertyGrid control set to Categorized mode.
CategoryAttribute()
CategoryAttribute(category: str)
"""
def Equals(self, obj):
"""
Equals(self: CategoryAttribute,obj: object) -> bool
Returns whether the value of the given object is equal to the current
System.ComponentModel.CategoryAttribute..
obj: The object to test the value equality of.
Returns: true if the value of the given object is equal to that of the current; otherwise,false.
"""
pass
def GetHashCode(self):
"""
GetHashCode(self: CategoryAttribute) -> int
Returns the hash code for this attribute.
Returns: A 32-bit signed integer hash code.
"""
pass
def GetLocalizedString(self, *args):
"""
GetLocalizedString(self: CategoryAttribute,value: str) -> str
Looks up the localized name of the specified category.
value: The identifer for the category to look up.
Returns: The localized name of the category,or null if a localized name does not exist.
"""
pass
def IsDefaultAttribute(self):
"""
IsDefaultAttribute(self: CategoryAttribute) -> bool
Determines if this attribute is the default.
Returns: true if the attribute is the default value for this attribute class; otherwise,false.
"""
pass
def __eq__(self, *args):
""" x.__eq__(y) <==> x==y """
pass
def __init__(self, *args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
@staticmethod
def __new__(self, category=None):
"""
__new__(cls: type)
__new__(cls: type,category: str)
"""
pass
def __ne__(self, *args):
pass
Category = property(lambda self: object(), lambda self, v: None, lambda self: None)
"""Gets the name of the category for the property or event that this attribute is applied to.
Get: Category(self: CategoryAttribute) -> str
"""
Action = None
Appearance = None
Asynchronous = None
Behavior = None
Data = None
Default = None
Design = None
DragDrop = None
Focus = None
Format = None
Key = None
Layout = None
Mouse = None
WindowStyle = None | 0.823719 | 0.408926 |
import json
import math
import unittest
from pytopojson import (
feature,
mesh,
stitch,
)
class MeshTestCase(unittest.TestCase):
def setUp(self):
self.feature = feature.Feature()
self.mesh = mesh.Mesh()
self.stitch = stitch.Stitch()
def test_mesh_ignores_null_geometries(self):
topology = {"type": "Topology", "objects": {}, "arcs": []}
self.assertDictEqual(
self.mesh(topology, [{"type": None}]),
{"type": "MultiLineString", "coordinates": []},
)
def test_mesh_stitches_together_two_connected_line_strings(self):
topology = {
"type": "Topology",
"objects": {
"collection": {
"type": "GeometryCollection",
"geometries": [
{"type": "LineString", "arcs": [0]},
{"type": "LineString", "arcs": [1]},
],
}
},
"arcs": [[[1, 0], [2, 0]], [[0, 0], [1, 0]]],
}
self.assertDictEqual(
{"type": "MultiLineString", "coordinates": [[[0, 0], [1, 0], [2, 0]]]},
self.mesh(topology, topology["objects"]["collection"]),
)
def test_mesh_does_not_stitch_together_two_disconnected_line_strings(self):
topology = {
"type": "Topology",
"objects": {
"collection": {
"type": "GeometryCollection",
"geometries": [
{"type": "LineString", "arcs": [0]},
{"type": "LineString", "arcs": [1]},
],
}
},
"arcs": [[[2, 0], [3, 0]], [[0, 0], [1, 0]]],
}
self.assertDictEqual(
{
"type": "MultiLineString",
"coordinates": [[[2, 0], [3, 0]], [[0, 0], [1, 0]]],
},
self.mesh(topology, topology["objects"]["collection"]),
) | tests/test_mesh.py | import json
import math
import unittest
from pytopojson import (
feature,
mesh,
stitch,
)
class MeshTestCase(unittest.TestCase):
def setUp(self):
self.feature = feature.Feature()
self.mesh = mesh.Mesh()
self.stitch = stitch.Stitch()
def test_mesh_ignores_null_geometries(self):
topology = {"type": "Topology", "objects": {}, "arcs": []}
self.assertDictEqual(
self.mesh(topology, [{"type": None}]),
{"type": "MultiLineString", "coordinates": []},
)
def test_mesh_stitches_together_two_connected_line_strings(self):
topology = {
"type": "Topology",
"objects": {
"collection": {
"type": "GeometryCollection",
"geometries": [
{"type": "LineString", "arcs": [0]},
{"type": "LineString", "arcs": [1]},
],
}
},
"arcs": [[[1, 0], [2, 0]], [[0, 0], [1, 0]]],
}
self.assertDictEqual(
{"type": "MultiLineString", "coordinates": [[[0, 0], [1, 0], [2, 0]]]},
self.mesh(topology, topology["objects"]["collection"]),
)
def test_mesh_does_not_stitch_together_two_disconnected_line_strings(self):
topology = {
"type": "Topology",
"objects": {
"collection": {
"type": "GeometryCollection",
"geometries": [
{"type": "LineString", "arcs": [0]},
{"type": "LineString", "arcs": [1]},
],
}
},
"arcs": [[[2, 0], [3, 0]], [[0, 0], [1, 0]]],
}
self.assertDictEqual(
{
"type": "MultiLineString",
"coordinates": [[[2, 0], [3, 0]], [[0, 0], [1, 0]]],
},
self.mesh(topology, topology["objects"]["collection"]),
) | 0.552298 | 0.456773 |
from pathlib import Path
from time import time
import tempfile
import pytest
from target_extraction.data_types import TargetTextCollection
from target_extraction.dataset_parsers import CACHE_DIRECTORY
from target_extraction.dataset_parsers import download_election_folder
from target_extraction.dataset_parsers import wang_2017_election_twitter_test, wang_2017_election_twitter_train
def test_download_election_folder():
def test_files_and_folders_downloaded(dir_path: Path):
annotation_folder = Path(dir_path, 'annotations')
assert annotation_folder.is_dir()
tweets_folder = Path(dir_path, 'tweets')
assert tweets_folder.is_dir()
train_id_fp = Path(dir_path, 'train_id.txt')
assert train_id_fp.exists()
test_id_fp = Path(dir_path, 'test_id.txt')
assert test_id_fp.exists()
# Test the normal case where it should successfully download the data
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir_path = Path(temp_dir, 'data dir')
download_election_folder(temp_dir_path)
test_files_and_folders_downloaded(Path(temp_dir_path, 'Wang 2017 Election Twitter'))
# Test the case where you do not need to specify a directory it just uses
# the standard cache_dir = CACHE_DIRECTORY
download_election_folder()
test_files_and_folders_downloaded(Path(CACHE_DIRECTORY, 'Wang 2017 Election Twitter'))
# Test the case where it has already been downloaded
# Should take longer to download than to check
with tempfile.TemporaryDirectory() as temp_dir:
first_download_time = time()
temp_dir_path_1 = Path(temp_dir, 'first')
download_election_folder(temp_dir_path_1)
first_download_time = time() - first_download_time
test_files_and_folders_downloaded(Path(temp_dir_path_1, 'Wang 2017 Election Twitter'))
second_time = time()
download_election_folder(temp_dir_path_1)
second_time = time() - second_time
test_files_and_folders_downloaded(Path(temp_dir_path_1, 'Wang 2017 Election Twitter'))
assert second_time < first_download_time
assert second_time < 0.005
assert first_download_time > 0.1
# Test the case where only a certain number of the files have been downloaded.
with tempfile.TemporaryDirectory() as temp_dir:
with pytest.raises(FileNotFoundError):
temp_internal_dir = Path(temp_dir, 'Wang 2017 Election Twitter')
temp_internal_dir.mkdir(parents=True, exist_ok=True)
temp_internal_dir.touch('test_id.txt')
download_election_folder(Path(temp_dir))
def test_train_and_test_dataset():
with tempfile.TemporaryDirectory() as temp_dir:
# Test both the normal cahce_dir and the given cache dir
for data_dir in [None, Path(temp_dir, 'twitter data')]:
train_data = wang_2017_election_twitter_train(data_dir)
test_data = wang_2017_election_twitter_test(data_dir)
assert len(train_data) > len(test_data)
combined_data = TargetTextCollection.combine(train_data, test_data)
assert 11899 == combined_data.number_targets() | tests/dataset_parsers/wang_2017_election_test.py | from pathlib import Path
from time import time
import tempfile
import pytest
from target_extraction.data_types import TargetTextCollection
from target_extraction.dataset_parsers import CACHE_DIRECTORY
from target_extraction.dataset_parsers import download_election_folder
from target_extraction.dataset_parsers import wang_2017_election_twitter_test, wang_2017_election_twitter_train
def test_download_election_folder():
def test_files_and_folders_downloaded(dir_path: Path):
annotation_folder = Path(dir_path, 'annotations')
assert annotation_folder.is_dir()
tweets_folder = Path(dir_path, 'tweets')
assert tweets_folder.is_dir()
train_id_fp = Path(dir_path, 'train_id.txt')
assert train_id_fp.exists()
test_id_fp = Path(dir_path, 'test_id.txt')
assert test_id_fp.exists()
# Test the normal case where it should successfully download the data
with tempfile.TemporaryDirectory() as temp_dir:
temp_dir_path = Path(temp_dir, 'data dir')
download_election_folder(temp_dir_path)
test_files_and_folders_downloaded(Path(temp_dir_path, 'Wang 2017 Election Twitter'))
# Test the case where you do not need to specify a directory it just uses
# the standard cache_dir = CACHE_DIRECTORY
download_election_folder()
test_files_and_folders_downloaded(Path(CACHE_DIRECTORY, 'Wang 2017 Election Twitter'))
# Test the case where it has already been downloaded
# Should take longer to download than to check
with tempfile.TemporaryDirectory() as temp_dir:
first_download_time = time()
temp_dir_path_1 = Path(temp_dir, 'first')
download_election_folder(temp_dir_path_1)
first_download_time = time() - first_download_time
test_files_and_folders_downloaded(Path(temp_dir_path_1, 'Wang 2017 Election Twitter'))
second_time = time()
download_election_folder(temp_dir_path_1)
second_time = time() - second_time
test_files_and_folders_downloaded(Path(temp_dir_path_1, 'Wang 2017 Election Twitter'))
assert second_time < first_download_time
assert second_time < 0.005
assert first_download_time > 0.1
# Test the case where only a certain number of the files have been downloaded.
with tempfile.TemporaryDirectory() as temp_dir:
with pytest.raises(FileNotFoundError):
temp_internal_dir = Path(temp_dir, 'Wang 2017 Election Twitter')
temp_internal_dir.mkdir(parents=True, exist_ok=True)
temp_internal_dir.touch('test_id.txt')
download_election_folder(Path(temp_dir))
def test_train_and_test_dataset():
with tempfile.TemporaryDirectory() as temp_dir:
# Test both the normal cahce_dir and the given cache dir
for data_dir in [None, Path(temp_dir, 'twitter data')]:
train_data = wang_2017_election_twitter_train(data_dir)
test_data = wang_2017_election_twitter_test(data_dir)
assert len(train_data) > len(test_data)
combined_data = TargetTextCollection.combine(train_data, test_data)
assert 11899 == combined_data.number_targets() | 0.505859 | 0.442516 |
import logging
from common_utils_py.did import did_to_id
from contracts_lib_py.keeper import Keeper
from contracts_lib_py.utils import process_fulfill_condition
from eth_utils import add_0x_prefix
logger = logging.getLogger(__name__)
def fulfill_access_secret_store_condition(event, agreement_id, did, service_agreement,
consumer_address, publisher_account, access_condition_id):
"""
Fulfill the access condition.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param consumer_address: ethereum account address of consumer, hex str
:param publisher_account: Account instance of the publisher
:param access_condition_id: hex str the id of the access secretstore condition for this
`agreement_id`
"""
if not event:
logger.debug(f'`fulfill_access_secret_store_condition` got empty event: '
f'event listener timed out.')
return
keeper = Keeper.get_instance()
if keeper.condition_manager.get_condition_state(access_condition_id) > 1:
logger.debug(
f'access secretstore condition already fulfilled/aborted: '
f'agreementId={agreement_id}, access secretstore conditionId={access_condition_id}'
)
return
logger.debug(f"grant access (agreement {agreement_id}) after event {event}.")
name_to_parameter = {param.name: param for param in
service_agreement.condition_by_name['accessSecretStore'].parameters}
document_id = add_0x_prefix(name_to_parameter['_documentId'].value)
asset_id = add_0x_prefix(did_to_id(did))
assert document_id == asset_id, f'document_id {document_id} <=> asset_id {asset_id} mismatch.'
args = (
agreement_id,
document_id,
consumer_address,
publisher_account
)
process_fulfill_condition(args, keeper.access_secret_store_condition, access_condition_id,
logger, keeper, 10)
fulfillAccessSecretStoreCondition = fulfill_access_secret_store_condition | nevermined_gateway_events/event_handlers/lockRewardCondition.py | import logging
from common_utils_py.did import did_to_id
from contracts_lib_py.keeper import Keeper
from contracts_lib_py.utils import process_fulfill_condition
from eth_utils import add_0x_prefix
logger = logging.getLogger(__name__)
def fulfill_access_secret_store_condition(event, agreement_id, did, service_agreement,
consumer_address, publisher_account, access_condition_id):
"""
Fulfill the access condition.
:param event: AttributeDict with the event data.
:param agreement_id: id of the agreement, hex str
:param did: DID, str
:param service_agreement: ServiceAgreement instance
:param consumer_address: ethereum account address of consumer, hex str
:param publisher_account: Account instance of the publisher
:param access_condition_id: hex str the id of the access secretstore condition for this
`agreement_id`
"""
if not event:
logger.debug(f'`fulfill_access_secret_store_condition` got empty event: '
f'event listener timed out.')
return
keeper = Keeper.get_instance()
if keeper.condition_manager.get_condition_state(access_condition_id) > 1:
logger.debug(
f'access secretstore condition already fulfilled/aborted: '
f'agreementId={agreement_id}, access secretstore conditionId={access_condition_id}'
)
return
logger.debug(f"grant access (agreement {agreement_id}) after event {event}.")
name_to_parameter = {param.name: param for param in
service_agreement.condition_by_name['accessSecretStore'].parameters}
document_id = add_0x_prefix(name_to_parameter['_documentId'].value)
asset_id = add_0x_prefix(did_to_id(did))
assert document_id == asset_id, f'document_id {document_id} <=> asset_id {asset_id} mismatch.'
args = (
agreement_id,
document_id,
consumer_address,
publisher_account
)
process_fulfill_condition(args, keeper.access_secret_store_condition, access_condition_id,
logger, keeper, 10)
fulfillAccessSecretStoreCondition = fulfill_access_secret_store_condition | 0.646125 | 0.115536 |
"""Tests for load_bigquery_stats."""
import datetime
import unittest
import flask
import mock
import webtest
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
from handlers.cron import load_bigquery_stats
@test_utils.with_cloud_emulators('datastore')
class LoadBigQueryStatsTest(unittest.TestCase):
"""Test LoadBigQueryStatsTest."""
def setUp(self):
flaskapp = flask.Flask('testflask')
flaskapp.add_url_rule(
'/load-bigquery-stats',
view_func=load_bigquery_stats.Handler.as_view('/load-bigquery-stats'))
self.app = webtest.TestApp(flaskapp)
data_types.Fuzzer(name='fuzzer', jobs=['job']).put()
data_types.Job(name='job').put()
test_helpers.patch(self, [
'clusterfuzz._internal.google_cloud_utils.big_query.get_api_client',
'handlers.base_handler.Handler.is_cron',
'handlers.cron.load_bigquery_stats.Handler._utc_now',
])
self.mock._utc_now.return_value = datetime.datetime(2016, 9, 8) # pylint: disable=protected-access
self.mock_bigquery = mock.MagicMock()
self.mock.get_api_client.return_value = self.mock_bigquery
def test_execute(self):
"""Tests executing of cron job."""
self.app.get('/load-bigquery-stats')
self.mock_bigquery.datasets().insert.assert_has_calls([
mock.call(
projectId='test-clusterfuzz',
body={
'datasetReference': {
'projectId': 'test-clusterfuzz',
'datasetId': 'fuzzer_stats'
}
}),
mock.call().execute()
])
self.mock_bigquery.tables().insert.assert_has_calls([
mock.call(
body={
'timePartitioning': {
'type': 'DAY'
},
'tableReference': {
'projectId': 'test-clusterfuzz',
'tableId': 'JobRun',
'datasetId': 'fuzzer_stats',
},
},
datasetId='fuzzer_stats',
projectId='test-clusterfuzz'),
mock.call().execute(),
mock.call(
body={
'timePartitioning': {
'type': 'DAY'
},
'tableReference': {
'projectId': 'test-clusterfuzz',
'tableId': 'TestcaseRun',
'datasetId': 'fuzzer_stats',
},
},
datasetId='fuzzer_stats',
projectId='test-clusterfuzz'),
mock.call().execute(),
])
self.mock_bigquery.jobs().insert.assert_has_calls(
[
mock.call(
body={
'configuration': {
'load': {
'destinationTable': {
'projectId': 'test-clusterfuzz',
'tableId': 'JobRun$20160907',
'datasetId': 'fuzzer_stats'
},
'schemaUpdateOptions': ['ALLOW_FIELD_ADDITION',],
'writeDisposition':
'WRITE_TRUNCATE',
'sourceUris': [
'gs://test-bigquery-bucket/fuzzer/JobRun/date/'
'20160907/*.json'
],
'sourceFormat':
'NEWLINE_DELIMITED_JSON',
'schema': {
'fields': [{
'type': 'INTEGER',
'name': 'testcases_executed',
'mode': 'NULLABLE'
}, {
'type': 'INTEGER',
'name': 'build_revision',
'mode': 'NULLABLE'
}, {
'type': 'INTEGER',
'name': 'new_crashes',
'mode': 'NULLABLE'
}, {
'type': 'STRING',
'name': 'job',
'mode': 'NULLABLE'
}, {
'type': 'FLOAT',
'name': 'timestamp',
'mode': 'NULLABLE'
}, {
'fields': [{
'type': 'STRING',
'name': 'crash_type',
'mode': 'NULLABLE'
}, {
'type': 'BOOLEAN',
'name': 'is_new',
'mode': 'NULLABLE'
}, {
'type': 'STRING',
'name': 'crash_state',
'mode': 'NULLABLE'
}, {
'type': 'BOOLEAN',
'name': 'security_flag',
'mode': 'NULLABLE'
}, {
'type': 'INTEGER',
'name': 'count',
'mode': 'NULLABLE'
}],
'type':
'RECORD',
'name':
'crashes',
'mode':
'REPEATED'
}, {
'type': 'INTEGER',
'name': 'known_crashes',
'mode': 'NULLABLE'
}, {
'type': 'STRING',
'name': 'fuzzer',
'mode': 'NULLABLE'
}, {
'type': 'STRING',
'name': 'kind',
'mode': 'NULLABLE'
}]
},
}
}
},
projectId='test-clusterfuzz'),
mock.call().execute(),
mock.call(
body={
'configuration': {
'load': {
'destinationTable': {
'projectId': 'test-clusterfuzz',
'tableId': 'TestcaseRun$20160907',
'datasetId': 'fuzzer_stats'
},
'schemaUpdateOptions': ['ALLOW_FIELD_ADDITION',],
'writeDisposition':
'WRITE_TRUNCATE',
'sourceUris': [
'gs://test-bigquery-bucket/fuzzer/TestcaseRun/'
'date/20160907/*.json'
],
'sourceFormat':
'NEWLINE_DELIMITED_JSON',
}
}
},
projectId='test-clusterfuzz'),
mock.call().execute(),
],
# Otherwise we need to mock two calls to mock.call().execute().__str__()
# which does not seem to work well.
any_order=True) | src/clusterfuzz/_internal/tests/appengine/handlers/cron/load_bigquery_stats_test.py | """Tests for load_bigquery_stats."""
import datetime
import unittest
import flask
import mock
import webtest
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
from handlers.cron import load_bigquery_stats
@test_utils.with_cloud_emulators('datastore')
class LoadBigQueryStatsTest(unittest.TestCase):
"""Test LoadBigQueryStatsTest."""
def setUp(self):
flaskapp = flask.Flask('testflask')
flaskapp.add_url_rule(
'/load-bigquery-stats',
view_func=load_bigquery_stats.Handler.as_view('/load-bigquery-stats'))
self.app = webtest.TestApp(flaskapp)
data_types.Fuzzer(name='fuzzer', jobs=['job']).put()
data_types.Job(name='job').put()
test_helpers.patch(self, [
'clusterfuzz._internal.google_cloud_utils.big_query.get_api_client',
'handlers.base_handler.Handler.is_cron',
'handlers.cron.load_bigquery_stats.Handler._utc_now',
])
self.mock._utc_now.return_value = datetime.datetime(2016, 9, 8) # pylint: disable=protected-access
self.mock_bigquery = mock.MagicMock()
self.mock.get_api_client.return_value = self.mock_bigquery
def test_execute(self):
"""Tests executing of cron job."""
self.app.get('/load-bigquery-stats')
self.mock_bigquery.datasets().insert.assert_has_calls([
mock.call(
projectId='test-clusterfuzz',
body={
'datasetReference': {
'projectId': 'test-clusterfuzz',
'datasetId': 'fuzzer_stats'
}
}),
mock.call().execute()
])
self.mock_bigquery.tables().insert.assert_has_calls([
mock.call(
body={
'timePartitioning': {
'type': 'DAY'
},
'tableReference': {
'projectId': 'test-clusterfuzz',
'tableId': 'JobRun',
'datasetId': 'fuzzer_stats',
},
},
datasetId='fuzzer_stats',
projectId='test-clusterfuzz'),
mock.call().execute(),
mock.call(
body={
'timePartitioning': {
'type': 'DAY'
},
'tableReference': {
'projectId': 'test-clusterfuzz',
'tableId': 'TestcaseRun',
'datasetId': 'fuzzer_stats',
},
},
datasetId='fuzzer_stats',
projectId='test-clusterfuzz'),
mock.call().execute(),
])
self.mock_bigquery.jobs().insert.assert_has_calls(
[
mock.call(
body={
'configuration': {
'load': {
'destinationTable': {
'projectId': 'test-clusterfuzz',
'tableId': 'JobRun$20160907',
'datasetId': 'fuzzer_stats'
},
'schemaUpdateOptions': ['ALLOW_FIELD_ADDITION',],
'writeDisposition':
'WRITE_TRUNCATE',
'sourceUris': [
'gs://test-bigquery-bucket/fuzzer/JobRun/date/'
'20160907/*.json'
],
'sourceFormat':
'NEWLINE_DELIMITED_JSON',
'schema': {
'fields': [{
'type': 'INTEGER',
'name': 'testcases_executed',
'mode': 'NULLABLE'
}, {
'type': 'INTEGER',
'name': 'build_revision',
'mode': 'NULLABLE'
}, {
'type': 'INTEGER',
'name': 'new_crashes',
'mode': 'NULLABLE'
}, {
'type': 'STRING',
'name': 'job',
'mode': 'NULLABLE'
}, {
'type': 'FLOAT',
'name': 'timestamp',
'mode': 'NULLABLE'
}, {
'fields': [{
'type': 'STRING',
'name': 'crash_type',
'mode': 'NULLABLE'
}, {
'type': 'BOOLEAN',
'name': 'is_new',
'mode': 'NULLABLE'
}, {
'type': 'STRING',
'name': 'crash_state',
'mode': 'NULLABLE'
}, {
'type': 'BOOLEAN',
'name': 'security_flag',
'mode': 'NULLABLE'
}, {
'type': 'INTEGER',
'name': 'count',
'mode': 'NULLABLE'
}],
'type':
'RECORD',
'name':
'crashes',
'mode':
'REPEATED'
}, {
'type': 'INTEGER',
'name': 'known_crashes',
'mode': 'NULLABLE'
}, {
'type': 'STRING',
'name': 'fuzzer',
'mode': 'NULLABLE'
}, {
'type': 'STRING',
'name': 'kind',
'mode': 'NULLABLE'
}]
},
}
}
},
projectId='test-clusterfuzz'),
mock.call().execute(),
mock.call(
body={
'configuration': {
'load': {
'destinationTable': {
'projectId': 'test-clusterfuzz',
'tableId': 'TestcaseRun$20160907',
'datasetId': 'fuzzer_stats'
},
'schemaUpdateOptions': ['ALLOW_FIELD_ADDITION',],
'writeDisposition':
'WRITE_TRUNCATE',
'sourceUris': [
'gs://test-bigquery-bucket/fuzzer/TestcaseRun/'
'date/20160907/*.json'
],
'sourceFormat':
'NEWLINE_DELIMITED_JSON',
}
}
},
projectId='test-clusterfuzz'),
mock.call().execute(),
],
# Otherwise we need to mock two calls to mock.call().execute().__str__()
# which does not seem to work well.
any_order=True) | 0.560974 | 0.314616 |
from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class VulkanLoaderConan(ConanFile):
name = "vulkan-loader"
description = "Khronos official Vulkan ICD desktop loader for Windows, Linux, and MacOS."
topics = ("vulkan", "loader", "desktop", "gpu")
homepage = "https://github.com/KhronosGroup/Vulkan-Loader"
url = "https://github.com/conan-io/conan-center-index"
license = "Apache-2.0"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_wsi_xcb": [True, False],
"with_wsi_xlib": [True, False],
"with_wsi_wayland": [True, False],
"with_wsi_directfb": [True, False],
}
default_options = {
"shared": True,
"fPIC": True,
"with_wsi_xcb": True,
"with_wsi_xlib": True,
"with_wsi_wayland": True,
"with_wsi_directfb": False,
}
generators = "cmake", "pkg_config"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _is_mingw(self):
return self.settings.os == "Windows" and self.settings.compiler == "gcc"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Linux":
del self.options.with_wsi_xcb
del self.options.with_wsi_xlib
del self.options.with_wsi_wayland
del self.options.with_wsi_directfb
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def requirements(self):
self.requires("vulkan-headers/{}".format(self.version))
if self.options.get_safe("with_wsi_xcb") or self.options.get_safe("with_wsi_xlib"):
self.requires("xorg/system")
if self.options.get_safe("with_wsi_wayland"):
self.requires("wayland/1.20.0")
def validate(self):
if self.options.get_safe("with_wsi_directfb"):
# TODO: directfb package
raise ConanInvalidConfiguration("Conan recipe for DirectFB is not available yet.")
if not tools.is_apple_os(self.settings.os) and not self.options.shared:
raise ConanInvalidConfiguration("Static builds are not supported on {}".format(self.settings.os))
if self.settings.compiler == "Visual Studio" and tools.Version(self.settings.compiler.version) < 15:
# FIXME: It should build but Visual Studio 2015 container in CI of CCI seems to lack some Win SDK headers
raise ConanInvalidConfiguration("Visual Studio < 2017 not yet supported in this recipe")
def build_requirements(self):
if self.options.get_safe("with_wsi_xcb") or self.options.get_safe("with_wsi_xlib") or \
self.options.get_safe("with_wsi_wayland") or self.options.get_safe("with_wsi_directfb"):
self.build_requires("pkgconf/1.7.4")
if self._is_mingw:
self.build_requires("jwasm/2.13")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
tools.replace_in_file(os.path.join(self._source_subfolder, "cmake", "FindVulkanHeaders.cmake"),
"HINTS ${VULKAN_HEADERS_INSTALL_DIR}/share/vulkan/registry",
"HINTS ${VULKAN_HEADERS_INSTALL_DIR}/res/vulkan/registry")
# Honor settings.compiler.runtime
tools.replace_in_file(os.path.join(self._source_subfolder, "loader", "CMakeLists.txt"),
"if(${configuration} MATCHES \"/MD\")",
"if(FALSE)")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["VULKAN_HEADERS_INSTALL_DIR"] = self.deps_cpp_info["vulkan-headers"].rootpath
self._cmake.definitions["BUILD_TESTS"] = False
self._cmake.definitions["USE_CCACHE"] = False
if self.settings.os == "Linux":
self._cmake.definitions["BUILD_WSI_XCB_SUPPORT"] = self.options.with_wsi_xcb
self._cmake.definitions["BUILD_WSI_XLIB_SUPPORT"] = self.options.with_wsi_xlib
self._cmake.definitions["BUILD_WSI_WAYLAND_SUPPORT"] = self.options.with_wsi_wayland
self._cmake.definitions["BUILD_WSI_DIRECTFB_SUPPORT"] = self.options.with_wsi_directfb
if self.settings.os == "Windows":
self._cmake.definitions["ENABLE_WIN10_ONECORE"] = False
if tools.is_apple_os(self.settings.os):
self._cmake.definitions["BUILD_STATIC_LOADER"] = not self.options.shared
self._cmake.definitions["BUILD_LOADER"] = True
if self.settings.os == "Windows":
self._cmake.definitions["USE_MASM"] = True
self._cmake.configure()
return self._cmake
def build(self):
if self.deps_cpp_info["vulkan-headers"].version != self.version:
raise ConanInvalidConfiguration("vulkan-loader must be built with the same version than vulkan-headers.")
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("LICENSE.txt", dst="licenses", src=self._source_subfolder)
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "loader"))
def package_info(self):
if self.deps_cpp_info["vulkan-headers"].version != self.version:
self.output.warn("vulkan-headers version is different than vulkan-loader. Several symbols might be missing.")
self.cpp_info.names["cmake_find_package"] = "Vulkan"
self.cpp_info.names["cmake_find_package_multi"] = "Vulkan"
self.cpp_info.names["pkg_config"] = "vulkan"
suffix = "-1" if self.settings.os == "Windows" else ""
self.cpp_info.libs = ["vulkan" + suffix]
self.cpp_info.includedirs = self.deps_cpp_info["vulkan-headers"].include_paths # allow to properly set Vulkan_INCLUDE_DIRS in cmake_find_package(_multi) generators
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["dl", "pthread", "m"]
elif self.settings.os == "Macos":
self.cpp_info.frameworks = ["CoreFoundation"]
vulkan_sdk_path = self.package_folder
self.output.info("Create VULKAN_SDK environment variable: {}".format(vulkan_sdk_path))
self.env_info.VULKAN_SDK = vulkan_sdk_path | recipes/vulkan-loader/all/conanfile.py | from conans import ConanFile, tools, CMake
from conans.errors import ConanInvalidConfiguration
import os
required_conan_version = ">=1.33.0"
class VulkanLoaderConan(ConanFile):
name = "vulkan-loader"
description = "Khronos official Vulkan ICD desktop loader for Windows, Linux, and MacOS."
topics = ("vulkan", "loader", "desktop", "gpu")
homepage = "https://github.com/KhronosGroup/Vulkan-Loader"
url = "https://github.com/conan-io/conan-center-index"
license = "Apache-2.0"
settings = "os", "arch", "compiler", "build_type"
options = {
"shared": [True, False],
"fPIC": [True, False],
"with_wsi_xcb": [True, False],
"with_wsi_xlib": [True, False],
"with_wsi_wayland": [True, False],
"with_wsi_directfb": [True, False],
}
default_options = {
"shared": True,
"fPIC": True,
"with_wsi_xcb": True,
"with_wsi_xlib": True,
"with_wsi_wayland": True,
"with_wsi_directfb": False,
}
generators = "cmake", "pkg_config"
_cmake = None
@property
def _source_subfolder(self):
return "source_subfolder"
@property
def _is_mingw(self):
return self.settings.os == "Windows" and self.settings.compiler == "gcc"
def export_sources(self):
self.copy("CMakeLists.txt")
for patch in self.conan_data.get("patches", {}).get(self.version, []):
self.copy(patch["patch_file"])
def config_options(self):
if self.settings.os == "Windows":
del self.options.fPIC
if self.settings.os != "Linux":
del self.options.with_wsi_xcb
del self.options.with_wsi_xlib
del self.options.with_wsi_wayland
del self.options.with_wsi_directfb
def configure(self):
if self.options.shared:
del self.options.fPIC
del self.settings.compiler.libcxx
del self.settings.compiler.cppstd
def requirements(self):
self.requires("vulkan-headers/{}".format(self.version))
if self.options.get_safe("with_wsi_xcb") or self.options.get_safe("with_wsi_xlib"):
self.requires("xorg/system")
if self.options.get_safe("with_wsi_wayland"):
self.requires("wayland/1.20.0")
def validate(self):
if self.options.get_safe("with_wsi_directfb"):
# TODO: directfb package
raise ConanInvalidConfiguration("Conan recipe for DirectFB is not available yet.")
if not tools.is_apple_os(self.settings.os) and not self.options.shared:
raise ConanInvalidConfiguration("Static builds are not supported on {}".format(self.settings.os))
if self.settings.compiler == "Visual Studio" and tools.Version(self.settings.compiler.version) < 15:
# FIXME: It should build but Visual Studio 2015 container in CI of CCI seems to lack some Win SDK headers
raise ConanInvalidConfiguration("Visual Studio < 2017 not yet supported in this recipe")
def build_requirements(self):
if self.options.get_safe("with_wsi_xcb") or self.options.get_safe("with_wsi_xlib") or \
self.options.get_safe("with_wsi_wayland") or self.options.get_safe("with_wsi_directfb"):
self.build_requires("pkgconf/1.7.4")
if self._is_mingw:
self.build_requires("jwasm/2.13")
def source(self):
tools.get(**self.conan_data["sources"][self.version],
destination=self._source_subfolder, strip_root=True)
def _patch_sources(self):
for patch in self.conan_data.get("patches", {}).get(self.version, []):
tools.patch(**patch)
tools.replace_in_file(os.path.join(self._source_subfolder, "cmake", "FindVulkanHeaders.cmake"),
"HINTS ${VULKAN_HEADERS_INSTALL_DIR}/share/vulkan/registry",
"HINTS ${VULKAN_HEADERS_INSTALL_DIR}/res/vulkan/registry")
# Honor settings.compiler.runtime
tools.replace_in_file(os.path.join(self._source_subfolder, "loader", "CMakeLists.txt"),
"if(${configuration} MATCHES \"/MD\")",
"if(FALSE)")
def _configure_cmake(self):
if self._cmake:
return self._cmake
self._cmake = CMake(self)
self._cmake.definitions["VULKAN_HEADERS_INSTALL_DIR"] = self.deps_cpp_info["vulkan-headers"].rootpath
self._cmake.definitions["BUILD_TESTS"] = False
self._cmake.definitions["USE_CCACHE"] = False
if self.settings.os == "Linux":
self._cmake.definitions["BUILD_WSI_XCB_SUPPORT"] = self.options.with_wsi_xcb
self._cmake.definitions["BUILD_WSI_XLIB_SUPPORT"] = self.options.with_wsi_xlib
self._cmake.definitions["BUILD_WSI_WAYLAND_SUPPORT"] = self.options.with_wsi_wayland
self._cmake.definitions["BUILD_WSI_DIRECTFB_SUPPORT"] = self.options.with_wsi_directfb
if self.settings.os == "Windows":
self._cmake.definitions["ENABLE_WIN10_ONECORE"] = False
if tools.is_apple_os(self.settings.os):
self._cmake.definitions["BUILD_STATIC_LOADER"] = not self.options.shared
self._cmake.definitions["BUILD_LOADER"] = True
if self.settings.os == "Windows":
self._cmake.definitions["USE_MASM"] = True
self._cmake.configure()
return self._cmake
def build(self):
if self.deps_cpp_info["vulkan-headers"].version != self.version:
raise ConanInvalidConfiguration("vulkan-loader must be built with the same version than vulkan-headers.")
self._patch_sources()
cmake = self._configure_cmake()
cmake.build()
def package(self):
cmake = self._configure_cmake()
cmake.install()
self.copy("LICENSE.txt", dst="licenses", src=self._source_subfolder)
tools.rmdir(os.path.join(self.package_folder, "lib", "pkgconfig"))
tools.rmdir(os.path.join(self.package_folder, "loader"))
def package_info(self):
if self.deps_cpp_info["vulkan-headers"].version != self.version:
self.output.warn("vulkan-headers version is different than vulkan-loader. Several symbols might be missing.")
self.cpp_info.names["cmake_find_package"] = "Vulkan"
self.cpp_info.names["cmake_find_package_multi"] = "Vulkan"
self.cpp_info.names["pkg_config"] = "vulkan"
suffix = "-1" if self.settings.os == "Windows" else ""
self.cpp_info.libs = ["vulkan" + suffix]
self.cpp_info.includedirs = self.deps_cpp_info["vulkan-headers"].include_paths # allow to properly set Vulkan_INCLUDE_DIRS in cmake_find_package(_multi) generators
if self.settings.os == "Linux":
self.cpp_info.system_libs = ["dl", "pthread", "m"]
elif self.settings.os == "Macos":
self.cpp_info.frameworks = ["CoreFoundation"]
vulkan_sdk_path = self.package_folder
self.output.info("Create VULKAN_SDK environment variable: {}".format(vulkan_sdk_path))
self.env_info.VULKAN_SDK = vulkan_sdk_path | 0.297878 | 0.133669 |
import argparse
import os
import pickle
import GPUtil
import numpy as np
from rte_pac.utils.data_reader import embed_data_set_with_glove_2, load_feature_by_data_set, \
number_feature, generate_concat_indices_for_inter_evidence, generate_concat_indices_for_claim
from rte_pac.utils.estimator_definitions import get_estimator
from rte_pac.utils.score import print_metrics
from rte_pac.utils.text_processing import load_whole_glove, vocab_map
from utils.config import Config
from common.util.log_helper import LogHelper
from scripts import save_model, load_model, generate_submission, RTERunPhase
from scripts.models.rte_bert_sent import main as main_bert_sent
from scripts.models.rte_bert_word import main as main_bert_word
from scripts.models.rte_credibility import main as main_credibility
from scripts.models.rte_credibility_mtl import main as main_credibility_mtl
from scripts.models.rte_esim_elmo import main as main_esim_elmo
from scripts.models.rte_esim_with_paths import main as main_paths
from scripts.models.rte_esim_with_scores import main as main_scores
from scripts.models.rte_fasttext import main as main_fasttext
from scripts.models.rte_mtl import main as main_mtl
from scripts.models.rte_use import main as main_use
def main(mode: RTERunPhase, config=None, estimator=None):
LogHelper.setup()
logger = LogHelper.get_logger(os.path.splitext(os.path.basename(__file__))[0] + "_" + str(mode))
if config is not None and isinstance(config, str):
logger.info("model: " + str(mode) + ", config: " + str(config))
Config.load_config(config)
if hasattr(Config, 'use_inter_evidence_comparison'):
use_inter_evidence_comparison = Config.use_inter_evidence_comparison
else:
use_inter_evidence_comparison = False
# 'esim_inter_evidence' model and 'esim_inter_evidence_claim_evidences_comparison' models need inter evidence inputs
use_inter_evidence_comparison = use_inter_evidence_comparison or Config.estimator_name in {'esim_inter_evidence',
'esim_inter_evidence_claim_evidences_comparison'}
if hasattr(Config, 'use_claim_evidences_comparison'):
use_claim_evidences_comparison = Config.use_claim_evidences_comparison
else:
use_claim_evidences_comparison = False
# 'esim_inter_evidence_claim_evidences_comparison' model needs claim-evidence inputs
use_claim_evidences_comparison = use_claim_evidences_comparison or Config.estimator_name in {
'esim_inter_evidence_claim_evidences_comparison'}
if hasattr(Config, 'use_extra_features'):
use_extra_features = Config.use_extra_features
else:
use_extra_features = False
if hasattr(Config, 'use_numeric_feature'):
use_numeric_feature = Config.use_numeric_feature
else:
use_numeric_feature = False
# 'esim_num_feature' model needs numeric feature inputs
use_numeric_feature = use_numeric_feature or Config.estimator_name in {'esim_num_feature'}
if hasattr(Config, 'is_snopes'):
is_snopes = Config.is_snopes
else:
is_snopes = False
logger.debug("is_snopes: " + str(is_snopes))
logger.info("scorer type: " + Config.estimator_name)
logger.info("random seed: " + str(Config.seed))
logger.info("ESIM arguments: " + str(Config.esim_end_2_end_hyper_param))
logger.info("use_inter_sentence_comparison: " + str(use_inter_evidence_comparison))
logger.info("use_extra_features: " + str(use_extra_features))
logger.info("use_numeric_feature: " + str(use_numeric_feature))
logger.info("use_claim_evidences_comparison: " + str(use_claim_evidences_comparison))
if mode == RTERunPhase.train:
# # training mode
if hasattr(Config, 'training_dump') and os.path.exists(Config.training_dump):
with open(Config.training_dump, 'rb') as f:
(X_dict, y_train) = pickle.load(f)
else:
training_set, vocab, embeddings, _, _ = embed_data_set_with_glove_2(Config.training_set_file,
Config.db_path,
glove_path=Config.glove_path,
threshold_b_sent_num=Config.max_sentences,
threshold_b_sent_size=Config.max_sentence_size,
threshold_h_sent_size=Config.max_claim_size,
is_snopes=is_snopes)
h_sent_sizes = training_set['data']['h_sent_sizes']
h_sizes = np.ones(len(h_sent_sizes), np.int32)
training_set['data']['h_sent_sizes'] = np.expand_dims(h_sent_sizes, 1)
training_set['data']['h_sizes'] = h_sizes
training_set['data']['h_np'] = np.expand_dims(training_set['data']['h_np'], 1)
valid_set, _, _, _, _ = embed_data_set_with_glove_2(Config.dev_set_file, Config.db_path,
vocab_dict=vocab, glove_embeddings=embeddings,
threshold_b_sent_num=Config.max_sentences,
threshold_b_sent_size=Config.max_sentence_size,
threshold_h_sent_size=Config.max_claim_size,
is_snopes=is_snopes)
h_sent_sizes = valid_set['data']['h_sent_sizes']
h_sizes = np.ones(len(h_sent_sizes), np.int32)
valid_set['data']['h_sent_sizes'] = np.expand_dims(h_sent_sizes, 1)
valid_set['data']['h_sizes'] = h_sizes
valid_set['data']['h_np'] = np.expand_dims(valid_set['data']['h_np'], 1)
if use_extra_features:
assert hasattr(Config, 'feature_path'), "Config should has feature_path if Config.use_feature is True"
training_claim_features, training_evidence_features = load_feature_by_data_set(Config.training_set_file,
Config.feature_path,
Config.max_sentences)
valid_claim_features, valid_evidence_features = load_feature_by_data_set(Config.dev_set_file,
Config.feature_path,
Config.max_sentences)
training_set['data']['h_feats'] = training_claim_features
training_set['data']['b_feats'] = training_evidence_features
valid_set['data']['h_feats'] = valid_claim_features
valid_set['data']['b_feats'] = valid_evidence_features
if use_numeric_feature:
training_num_feat = number_feature(Config.training_set_file, Config.db_path, Config.max_sentences,
is_snopes)
valid_num_feat = number_feature(Config.dev_set_file, Config.db_path, Config.max_sentences, is_snopes)
training_set['data']['num_feat'] = training_num_feat
valid_set['data']['num_feat'] = valid_num_feat
if use_inter_evidence_comparison:
training_concat_sent_indices, training_concat_sent_sizes = generate_concat_indices_for_inter_evidence(
training_set['data']['b_np'],
training_set['data']['b_sent_sizes'],
Config.max_sentence_size, Config.max_sentences)
training_set['data']['b_concat_indices'] = training_concat_sent_indices
training_set['data']['b_concat_sizes'] = training_concat_sent_sizes
valid_concat_sent_indices, valid_concat_sent_sizes = generate_concat_indices_for_inter_evidence(
valid_set['data']['b_np'],
valid_set['data'][
'b_sent_sizes'],
Config.max_sentence_size,
Config.max_sentences)
valid_set['data']['b_concat_indices'] = valid_concat_sent_indices
valid_set['data']['b_concat_sizes'] = valid_concat_sent_sizes
if use_claim_evidences_comparison:
training_all_evidences_indices, training_all_evidences_sizes = generate_concat_indices_for_claim(
training_set['data']['b_np'], training_set['data']['b_sent_sizes'], Config.max_sentence_size,
Config.max_sentences)
training_set['data']['b_concat_indices_for_h'] = training_all_evidences_indices
training_set['data']['b_concat_sizes_for_h'] = training_all_evidences_sizes
valid_all_evidences_indices, valid_all_evidences_sizes = generate_concat_indices_for_claim(
valid_set['data']['b_np'], valid_set['data']['b_sent_sizes'], Config.max_sentence_size,
Config.max_sentences)
valid_set['data']['b_concat_indices_for_h'] = valid_all_evidences_indices
valid_set['data']['b_concat_sizes_for_h'] = valid_all_evidences_sizes
X_dict = {
'X_train': training_set['data'],
'X_valid': valid_set['data'],
'y_valid': valid_set['label'],
'embedding': embeddings
}
y_train = training_set['label']
if hasattr(Config, 'training_dump'):
with open(Config.training_dump, 'wb') as f:
pickle.dump((X_dict, y_train), f, protocol=pickle.HIGHEST_PROTOCOL)
if estimator is None:
estimator = get_estimator(Config.estimator_name, Config.ckpt_folder)
if 'CUDA_VISIBLE_DEVICES' not in os.environ or not str(os.environ['CUDA_VISIBLE_DEVICES']).strip():
os.environ['CUDA_VISIBLE_DEVICES'] = str(
GPUtil.getFirstAvailable(maxLoad=1.0, maxMemory=1.0 - Config.max_gpu_memory)[0])
estimator.fit(X_dict, y_train)
save_model(estimator, Config.model_folder, Config.pickle_name, logger)
else:
# testing mode
restore_param_required = estimator is None
if estimator is None:
estimator = load_model(Config.model_folder, Config.pickle_name)
if estimator is None:
estimator = get_estimator(Config.estimator_name, Config.ckpt_folder)
vocab, embeddings = load_whole_glove(Config.glove_path)
vocab = vocab_map(vocab)
test_set, _, _, _, _ = embed_data_set_with_glove_2(Config.test_set_file, Config.db_path, vocab_dict=vocab,
glove_embeddings=embeddings,
threshold_b_sent_num=Config.max_sentences,
threshold_b_sent_size=Config.max_sentence_size,
threshold_h_sent_size=Config.max_claim_size,
is_snopes=is_snopes)
h_sent_sizes = test_set['data']['h_sent_sizes']
h_sizes = np.ones(len(h_sent_sizes), np.int32)
test_set['data']['h_sent_sizes'] = np.expand_dims(h_sent_sizes, 1)
test_set['data']['h_sizes'] = h_sizes
test_set['data']['h_np'] = np.expand_dims(test_set['data']['h_np'], 1)
if use_extra_features:
assert hasattr(Config, 'feature_path'), "Config should has feature_path if Config.use_feature is True"
test_claim_features, test_evidence_features = load_feature_by_data_set(Config.test_set_file,
Config.feature_path,
Config.max_sentences)
test_set['data']['h_feats'] = test_claim_features
test_set['data']['b_feats'] = test_evidence_features
if use_numeric_feature:
test_num_feat = number_feature(Config.test_set_file, Config.db_path, Config.max_sentences, is_snopes)
test_set['data']['num_feat'] = test_num_feat
x_dict = {
'X_test': test_set['data'],
'embedding': embeddings
}
if use_inter_evidence_comparison:
test_concat_sent_indices, test_concat_sent_sizes = generate_concat_indices_for_inter_evidence(
test_set['data']['b_np'],
test_set['data']['b_sent_sizes'],
Config.max_sentence_size,
Config.max_sentences)
test_set['data']['b_concat_indices'] = test_concat_sent_indices
test_set['data']['b_concat_sizes'] = test_concat_sent_sizes
if use_claim_evidences_comparison:
test_all_evidences_indices, test_all_evidences_sizes = generate_concat_indices_for_claim(
test_set['data']['b_np'], test_set['data']['b_sent_sizes'], Config.max_sentence_size,
Config.max_sentences)
test_set['data']['b_concat_indices_for_h'] = test_all_evidences_indices
test_set['data']['b_concat_sizes_for_h'] = test_all_evidences_sizes
if 'CUDA_VISIBLE_DEVICES' not in os.environ or not str(os.environ['CUDA_VISIBLE_DEVICES']).strip():
os.environ['CUDA_VISIBLE_DEVICES'] = str(
GPUtil.getFirstAvailable(maxLoad=1.0, maxMemory=1.0 - Config.max_gpu_memory)[0])
predictions = estimator.predict(x_dict, restore_param_required=restore_param_required)
generate_submission(predictions, test_set['id'], Config.test_set_file, Config.submission_file)
if 'label' in test_set:
print_metrics(test_set['label'], predictions, logger)
return estimator
def entrance(mode: RTERunPhase, config=None, estimator=None):
if config is not None:
Config.load_config(config)
if Config.estimator_name in {'esim', 'esim_no_attention', 'esim_fasttext', 'esim_fasttext_no_attention',
'voting_esim_hard', 'voting_esim_soft', 'han_fasttext'}:
return main_fasttext(mode, estimator=estimator)
elif Config.estimator_name == 'esim_mtl':
return main_mtl(mode, estimator=estimator)
elif Config.estimator_name in {'credibility_soft_voting', 'credibility'}:
return main_credibility(mode, estimator=estimator)
elif Config.estimator_name == 'credibility_mtl':
return main_credibility_mtl(mode, estimator=estimator)
elif Config.estimator_name in {'bert_sent_attention', 'bert_sent_bilstm'}:
return main_bert_sent(mode, estimator=estimator)
elif Config.estimator_name in {'bert_word_bilstm'}:
return main_bert_word(mode, estimator=estimator)
elif Config.estimator_name == 'esim_elmo':
return main_esim_elmo(mode, estimator=estimator)
elif Config.estimator_name in {'use_attention', 'use_bilstm'}:
return main_use(mode, estimator=estimator)
elif Config.estimator_name == 'esim_glove_scores_attention':
return main_scores(mode, estimator=estimator)
elif Config.estimator_name == 'esim_paths':
return main_paths(mode, estimator=estimator)
else:
return main(mode, estimator=estimator)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=RTERunPhase.from_string, help='\'train\' or \'test\'', choices=list(RTERunPhase),
required=True)
parser.add_argument('--config', help='/path/to/config/file, in JSON format')
args = parser.parse_args()
entrance(args.mode, args.config) | src/scripts/rte.py | import argparse
import os
import pickle
import GPUtil
import numpy as np
from rte_pac.utils.data_reader import embed_data_set_with_glove_2, load_feature_by_data_set, \
number_feature, generate_concat_indices_for_inter_evidence, generate_concat_indices_for_claim
from rte_pac.utils.estimator_definitions import get_estimator
from rte_pac.utils.score import print_metrics
from rte_pac.utils.text_processing import load_whole_glove, vocab_map
from utils.config import Config
from common.util.log_helper import LogHelper
from scripts import save_model, load_model, generate_submission, RTERunPhase
from scripts.models.rte_bert_sent import main as main_bert_sent
from scripts.models.rte_bert_word import main as main_bert_word
from scripts.models.rte_credibility import main as main_credibility
from scripts.models.rte_credibility_mtl import main as main_credibility_mtl
from scripts.models.rte_esim_elmo import main as main_esim_elmo
from scripts.models.rte_esim_with_paths import main as main_paths
from scripts.models.rte_esim_with_scores import main as main_scores
from scripts.models.rte_fasttext import main as main_fasttext
from scripts.models.rte_mtl import main as main_mtl
from scripts.models.rte_use import main as main_use
def main(mode: RTERunPhase, config=None, estimator=None):
LogHelper.setup()
logger = LogHelper.get_logger(os.path.splitext(os.path.basename(__file__))[0] + "_" + str(mode))
if config is not None and isinstance(config, str):
logger.info("model: " + str(mode) + ", config: " + str(config))
Config.load_config(config)
if hasattr(Config, 'use_inter_evidence_comparison'):
use_inter_evidence_comparison = Config.use_inter_evidence_comparison
else:
use_inter_evidence_comparison = False
# 'esim_inter_evidence' model and 'esim_inter_evidence_claim_evidences_comparison' models need inter evidence inputs
use_inter_evidence_comparison = use_inter_evidence_comparison or Config.estimator_name in {'esim_inter_evidence',
'esim_inter_evidence_claim_evidences_comparison'}
if hasattr(Config, 'use_claim_evidences_comparison'):
use_claim_evidences_comparison = Config.use_claim_evidences_comparison
else:
use_claim_evidences_comparison = False
# 'esim_inter_evidence_claim_evidences_comparison' model needs claim-evidence inputs
use_claim_evidences_comparison = use_claim_evidences_comparison or Config.estimator_name in {
'esim_inter_evidence_claim_evidences_comparison'}
if hasattr(Config, 'use_extra_features'):
use_extra_features = Config.use_extra_features
else:
use_extra_features = False
if hasattr(Config, 'use_numeric_feature'):
use_numeric_feature = Config.use_numeric_feature
else:
use_numeric_feature = False
# 'esim_num_feature' model needs numeric feature inputs
use_numeric_feature = use_numeric_feature or Config.estimator_name in {'esim_num_feature'}
if hasattr(Config, 'is_snopes'):
is_snopes = Config.is_snopes
else:
is_snopes = False
logger.debug("is_snopes: " + str(is_snopes))
logger.info("scorer type: " + Config.estimator_name)
logger.info("random seed: " + str(Config.seed))
logger.info("ESIM arguments: " + str(Config.esim_end_2_end_hyper_param))
logger.info("use_inter_sentence_comparison: " + str(use_inter_evidence_comparison))
logger.info("use_extra_features: " + str(use_extra_features))
logger.info("use_numeric_feature: " + str(use_numeric_feature))
logger.info("use_claim_evidences_comparison: " + str(use_claim_evidences_comparison))
if mode == RTERunPhase.train:
# # training mode
if hasattr(Config, 'training_dump') and os.path.exists(Config.training_dump):
with open(Config.training_dump, 'rb') as f:
(X_dict, y_train) = pickle.load(f)
else:
training_set, vocab, embeddings, _, _ = embed_data_set_with_glove_2(Config.training_set_file,
Config.db_path,
glove_path=Config.glove_path,
threshold_b_sent_num=Config.max_sentences,
threshold_b_sent_size=Config.max_sentence_size,
threshold_h_sent_size=Config.max_claim_size,
is_snopes=is_snopes)
h_sent_sizes = training_set['data']['h_sent_sizes']
h_sizes = np.ones(len(h_sent_sizes), np.int32)
training_set['data']['h_sent_sizes'] = np.expand_dims(h_sent_sizes, 1)
training_set['data']['h_sizes'] = h_sizes
training_set['data']['h_np'] = np.expand_dims(training_set['data']['h_np'], 1)
valid_set, _, _, _, _ = embed_data_set_with_glove_2(Config.dev_set_file, Config.db_path,
vocab_dict=vocab, glove_embeddings=embeddings,
threshold_b_sent_num=Config.max_sentences,
threshold_b_sent_size=Config.max_sentence_size,
threshold_h_sent_size=Config.max_claim_size,
is_snopes=is_snopes)
h_sent_sizes = valid_set['data']['h_sent_sizes']
h_sizes = np.ones(len(h_sent_sizes), np.int32)
valid_set['data']['h_sent_sizes'] = np.expand_dims(h_sent_sizes, 1)
valid_set['data']['h_sizes'] = h_sizes
valid_set['data']['h_np'] = np.expand_dims(valid_set['data']['h_np'], 1)
if use_extra_features:
assert hasattr(Config, 'feature_path'), "Config should has feature_path if Config.use_feature is True"
training_claim_features, training_evidence_features = load_feature_by_data_set(Config.training_set_file,
Config.feature_path,
Config.max_sentences)
valid_claim_features, valid_evidence_features = load_feature_by_data_set(Config.dev_set_file,
Config.feature_path,
Config.max_sentences)
training_set['data']['h_feats'] = training_claim_features
training_set['data']['b_feats'] = training_evidence_features
valid_set['data']['h_feats'] = valid_claim_features
valid_set['data']['b_feats'] = valid_evidence_features
if use_numeric_feature:
training_num_feat = number_feature(Config.training_set_file, Config.db_path, Config.max_sentences,
is_snopes)
valid_num_feat = number_feature(Config.dev_set_file, Config.db_path, Config.max_sentences, is_snopes)
training_set['data']['num_feat'] = training_num_feat
valid_set['data']['num_feat'] = valid_num_feat
if use_inter_evidence_comparison:
training_concat_sent_indices, training_concat_sent_sizes = generate_concat_indices_for_inter_evidence(
training_set['data']['b_np'],
training_set['data']['b_sent_sizes'],
Config.max_sentence_size, Config.max_sentences)
training_set['data']['b_concat_indices'] = training_concat_sent_indices
training_set['data']['b_concat_sizes'] = training_concat_sent_sizes
valid_concat_sent_indices, valid_concat_sent_sizes = generate_concat_indices_for_inter_evidence(
valid_set['data']['b_np'],
valid_set['data'][
'b_sent_sizes'],
Config.max_sentence_size,
Config.max_sentences)
valid_set['data']['b_concat_indices'] = valid_concat_sent_indices
valid_set['data']['b_concat_sizes'] = valid_concat_sent_sizes
if use_claim_evidences_comparison:
training_all_evidences_indices, training_all_evidences_sizes = generate_concat_indices_for_claim(
training_set['data']['b_np'], training_set['data']['b_sent_sizes'], Config.max_sentence_size,
Config.max_sentences)
training_set['data']['b_concat_indices_for_h'] = training_all_evidences_indices
training_set['data']['b_concat_sizes_for_h'] = training_all_evidences_sizes
valid_all_evidences_indices, valid_all_evidences_sizes = generate_concat_indices_for_claim(
valid_set['data']['b_np'], valid_set['data']['b_sent_sizes'], Config.max_sentence_size,
Config.max_sentences)
valid_set['data']['b_concat_indices_for_h'] = valid_all_evidences_indices
valid_set['data']['b_concat_sizes_for_h'] = valid_all_evidences_sizes
X_dict = {
'X_train': training_set['data'],
'X_valid': valid_set['data'],
'y_valid': valid_set['label'],
'embedding': embeddings
}
y_train = training_set['label']
if hasattr(Config, 'training_dump'):
with open(Config.training_dump, 'wb') as f:
pickle.dump((X_dict, y_train), f, protocol=pickle.HIGHEST_PROTOCOL)
if estimator is None:
estimator = get_estimator(Config.estimator_name, Config.ckpt_folder)
if 'CUDA_VISIBLE_DEVICES' not in os.environ or not str(os.environ['CUDA_VISIBLE_DEVICES']).strip():
os.environ['CUDA_VISIBLE_DEVICES'] = str(
GPUtil.getFirstAvailable(maxLoad=1.0, maxMemory=1.0 - Config.max_gpu_memory)[0])
estimator.fit(X_dict, y_train)
save_model(estimator, Config.model_folder, Config.pickle_name, logger)
else:
# testing mode
restore_param_required = estimator is None
if estimator is None:
estimator = load_model(Config.model_folder, Config.pickle_name)
if estimator is None:
estimator = get_estimator(Config.estimator_name, Config.ckpt_folder)
vocab, embeddings = load_whole_glove(Config.glove_path)
vocab = vocab_map(vocab)
test_set, _, _, _, _ = embed_data_set_with_glove_2(Config.test_set_file, Config.db_path, vocab_dict=vocab,
glove_embeddings=embeddings,
threshold_b_sent_num=Config.max_sentences,
threshold_b_sent_size=Config.max_sentence_size,
threshold_h_sent_size=Config.max_claim_size,
is_snopes=is_snopes)
h_sent_sizes = test_set['data']['h_sent_sizes']
h_sizes = np.ones(len(h_sent_sizes), np.int32)
test_set['data']['h_sent_sizes'] = np.expand_dims(h_sent_sizes, 1)
test_set['data']['h_sizes'] = h_sizes
test_set['data']['h_np'] = np.expand_dims(test_set['data']['h_np'], 1)
if use_extra_features:
assert hasattr(Config, 'feature_path'), "Config should has feature_path if Config.use_feature is True"
test_claim_features, test_evidence_features = load_feature_by_data_set(Config.test_set_file,
Config.feature_path,
Config.max_sentences)
test_set['data']['h_feats'] = test_claim_features
test_set['data']['b_feats'] = test_evidence_features
if use_numeric_feature:
test_num_feat = number_feature(Config.test_set_file, Config.db_path, Config.max_sentences, is_snopes)
test_set['data']['num_feat'] = test_num_feat
x_dict = {
'X_test': test_set['data'],
'embedding': embeddings
}
if use_inter_evidence_comparison:
test_concat_sent_indices, test_concat_sent_sizes = generate_concat_indices_for_inter_evidence(
test_set['data']['b_np'],
test_set['data']['b_sent_sizes'],
Config.max_sentence_size,
Config.max_sentences)
test_set['data']['b_concat_indices'] = test_concat_sent_indices
test_set['data']['b_concat_sizes'] = test_concat_sent_sizes
if use_claim_evidences_comparison:
test_all_evidences_indices, test_all_evidences_sizes = generate_concat_indices_for_claim(
test_set['data']['b_np'], test_set['data']['b_sent_sizes'], Config.max_sentence_size,
Config.max_sentences)
test_set['data']['b_concat_indices_for_h'] = test_all_evidences_indices
test_set['data']['b_concat_sizes_for_h'] = test_all_evidences_sizes
if 'CUDA_VISIBLE_DEVICES' not in os.environ or not str(os.environ['CUDA_VISIBLE_DEVICES']).strip():
os.environ['CUDA_VISIBLE_DEVICES'] = str(
GPUtil.getFirstAvailable(maxLoad=1.0, maxMemory=1.0 - Config.max_gpu_memory)[0])
predictions = estimator.predict(x_dict, restore_param_required=restore_param_required)
generate_submission(predictions, test_set['id'], Config.test_set_file, Config.submission_file)
if 'label' in test_set:
print_metrics(test_set['label'], predictions, logger)
return estimator
def entrance(mode: RTERunPhase, config=None, estimator=None):
if config is not None:
Config.load_config(config)
if Config.estimator_name in {'esim', 'esim_no_attention', 'esim_fasttext', 'esim_fasttext_no_attention',
'voting_esim_hard', 'voting_esim_soft', 'han_fasttext'}:
return main_fasttext(mode, estimator=estimator)
elif Config.estimator_name == 'esim_mtl':
return main_mtl(mode, estimator=estimator)
elif Config.estimator_name in {'credibility_soft_voting', 'credibility'}:
return main_credibility(mode, estimator=estimator)
elif Config.estimator_name == 'credibility_mtl':
return main_credibility_mtl(mode, estimator=estimator)
elif Config.estimator_name in {'bert_sent_attention', 'bert_sent_bilstm'}:
return main_bert_sent(mode, estimator=estimator)
elif Config.estimator_name in {'bert_word_bilstm'}:
return main_bert_word(mode, estimator=estimator)
elif Config.estimator_name == 'esim_elmo':
return main_esim_elmo(mode, estimator=estimator)
elif Config.estimator_name in {'use_attention', 'use_bilstm'}:
return main_use(mode, estimator=estimator)
elif Config.estimator_name == 'esim_glove_scores_attention':
return main_scores(mode, estimator=estimator)
elif Config.estimator_name == 'esim_paths':
return main_paths(mode, estimator=estimator)
else:
return main(mode, estimator=estimator)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--mode', type=RTERunPhase.from_string, help='\'train\' or \'test\'', choices=list(RTERunPhase),
required=True)
parser.add_argument('--config', help='/path/to/config/file, in JSON format')
args = parser.parse_args()
entrance(args.mode, args.config) | 0.347426 | 0.09236 |
import random
import pytest
import networkx as nx
import pandas as pd
from stellargraph.mapper import DirectedGraphSAGENodeGenerator
from stellargraph.core.graph import StellarGraph, StellarDiGraph
def create_simple_graph():
"""
Creates a simple directed graph for testing. The node ids are integers.
Returns:
A small, directed graph with 3 nodes and 2 edges in StellarDiGraph format.
"""
g = nx.DiGraph()
edges = [(1, 2), (2, 3)]
g.add_edges_from(edges)
nodes = list(g.nodes())
features = [(node, -1.0 * node) for node in nodes]
df = pd.DataFrame(features, columns=["id", "f0"]).set_index("id")
return StellarDiGraph(g, node_features=df)
class TestDirectedNodeGenerator(object):
"""
Test various aspects of the directed GrapohSAGE node generator, with the focus
on the sampled neighbourhoods and the extracted features.
"""
def sample_one_hop(self, num_in_samples, num_out_samples):
g = create_simple_graph()
nodes = list(g.nodes())
in_samples = [num_in_samples]
out_samples = [num_out_samples]
gen = DirectedGraphSAGENodeGenerator(g, len(g), in_samples, out_samples)
flow = gen.flow(node_ids=nodes, shuffle=False)
# Obtain tree of sampled features
features = gen.sample_features(nodes)
num_hops = len(in_samples)
tree_len = 2 ** (num_hops + 1) - 1
assert len(features) == tree_len
# Check node features
node_features = features[0]
assert len(node_features) == len(nodes)
assert node_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
assert node_features[idx, 0, 0] == -1.0 * node
# Check in-node features
in_features = features[1]
assert in_features.shape == (len(nodes), in_samples[0], 1)
for n_idx in range(in_samples[0]):
for idx, node in enumerate(nodes):
if node == 1:
# None -> 1
assert in_features[idx, n_idx, 0] == 0.0
elif node == 2:
# 1 -> 2
assert in_features[idx, n_idx, 0] == -1.0
elif node == 3:
# 2 -> 3
assert in_features[idx, n_idx, 0] == -2.0
else:
assert False
# Check out-node features
out_features = features[2]
assert out_features.shape == (len(nodes), out_samples[0], 1)
for n_idx in range(out_samples[0]):
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2
assert out_features[idx, n_idx, 0] == -2.0
elif node == 2:
# 2 -> 3
assert out_features[idx, n_idx, 0] == -3.0
elif node == 3:
# 3 -> None
assert out_features[idx, n_idx, 0] == 0.0
else:
assert False
def test_one_hop(self):
# Test 1 in-node and 1 out-node sampling
self.sample_one_hop(1, 1)
# Test 0 in-nodes and 1 out-node sampling
self.sample_one_hop(0, 1)
# Test 1 in-node and 0 out-nodes sampling
self.sample_one_hop(1, 0)
# Test 0 in-nodes and 0 out-nodes sampling
self.sample_one_hop(0, 0)
# Test 2 in-nodes and 3 out-nodes sampling
self.sample_one_hop(2, 3)
def test_two_hop(self):
g = create_simple_graph()
nodes = list(g.nodes())
gen = DirectedGraphSAGENodeGenerator(
g, batch_size=len(g), in_samples=[1, 1], out_samples=[1, 1]
)
flow = gen.flow(node_ids=nodes, shuffle=False)
features = gen.sample_features(nodes)
num_hops = 2
tree_len = 2 ** (num_hops + 1) - 1
assert len(features) == tree_len
# Check node features
node_features = features[0]
assert len(node_features) == len(nodes)
assert node_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
assert node_features[idx, 0, 0] == -1.0 * node
# Check in-node features
in_features = features[1]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *1 -> 2
assert in_features[idx, 0, 0] == -1.0
elif node == 3:
# *2 -> 3
assert in_features[idx, 0, 0] == -2.0
else:
assert False
# Check out-node features
out_features = features[2]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> *2
assert out_features[idx, 0, 0] == -2.0
elif node == 2:
# 2 -> *3
assert out_features[idx, 0, 0] == -3.0
elif node == 3:
# 3 -> *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
# Check in-in-node features
in_features = features[3]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None -> None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *None -> 1 -> 2
assert in_features[idx, 0, 0] == 0.0
elif node == 3:
# *1 -> 2 -> 3
assert in_features[idx, 0, 0] == -1.0
else:
assert False
# Check in-out-node features
in_features = features[4]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None <- None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *2 <- 1 -> 2
assert in_features[idx, 0, 0] == -2.0
elif node == 3:
# *3 <- 2 -> 3
assert in_features[idx, 0, 0] == -3.0
else:
assert False
# Check out-in-node features
out_features = features[5]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2 <- *1
assert out_features[idx, 0, 0] == -1.0
elif node == 2:
# 2 -> 3 <- *2
assert out_features[idx, 0, 0] == -2.0
elif node == 3:
# 3 -> None <- *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
# Check out-out-node features
out_features = features[6]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2 -> *3
assert out_features[idx, 0, 0] == -3.0
elif node == 2:
# 2 -> 3 -> *None
assert out_features[idx, 0, 0] == 0.0
elif node == 3:
# 3 -> None -> *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False | tests/mapper/test_directed_node_generator.py |
import random
import pytest
import networkx as nx
import pandas as pd
from stellargraph.mapper import DirectedGraphSAGENodeGenerator
from stellargraph.core.graph import StellarGraph, StellarDiGraph
def create_simple_graph():
"""
Creates a simple directed graph for testing. The node ids are integers.
Returns:
A small, directed graph with 3 nodes and 2 edges in StellarDiGraph format.
"""
g = nx.DiGraph()
edges = [(1, 2), (2, 3)]
g.add_edges_from(edges)
nodes = list(g.nodes())
features = [(node, -1.0 * node) for node in nodes]
df = pd.DataFrame(features, columns=["id", "f0"]).set_index("id")
return StellarDiGraph(g, node_features=df)
class TestDirectedNodeGenerator(object):
"""
Test various aspects of the directed GrapohSAGE node generator, with the focus
on the sampled neighbourhoods and the extracted features.
"""
def sample_one_hop(self, num_in_samples, num_out_samples):
g = create_simple_graph()
nodes = list(g.nodes())
in_samples = [num_in_samples]
out_samples = [num_out_samples]
gen = DirectedGraphSAGENodeGenerator(g, len(g), in_samples, out_samples)
flow = gen.flow(node_ids=nodes, shuffle=False)
# Obtain tree of sampled features
features = gen.sample_features(nodes)
num_hops = len(in_samples)
tree_len = 2 ** (num_hops + 1) - 1
assert len(features) == tree_len
# Check node features
node_features = features[0]
assert len(node_features) == len(nodes)
assert node_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
assert node_features[idx, 0, 0] == -1.0 * node
# Check in-node features
in_features = features[1]
assert in_features.shape == (len(nodes), in_samples[0], 1)
for n_idx in range(in_samples[0]):
for idx, node in enumerate(nodes):
if node == 1:
# None -> 1
assert in_features[idx, n_idx, 0] == 0.0
elif node == 2:
# 1 -> 2
assert in_features[idx, n_idx, 0] == -1.0
elif node == 3:
# 2 -> 3
assert in_features[idx, n_idx, 0] == -2.0
else:
assert False
# Check out-node features
out_features = features[2]
assert out_features.shape == (len(nodes), out_samples[0], 1)
for n_idx in range(out_samples[0]):
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2
assert out_features[idx, n_idx, 0] == -2.0
elif node == 2:
# 2 -> 3
assert out_features[idx, n_idx, 0] == -3.0
elif node == 3:
# 3 -> None
assert out_features[idx, n_idx, 0] == 0.0
else:
assert False
def test_one_hop(self):
# Test 1 in-node and 1 out-node sampling
self.sample_one_hop(1, 1)
# Test 0 in-nodes and 1 out-node sampling
self.sample_one_hop(0, 1)
# Test 1 in-node and 0 out-nodes sampling
self.sample_one_hop(1, 0)
# Test 0 in-nodes and 0 out-nodes sampling
self.sample_one_hop(0, 0)
# Test 2 in-nodes and 3 out-nodes sampling
self.sample_one_hop(2, 3)
def test_two_hop(self):
g = create_simple_graph()
nodes = list(g.nodes())
gen = DirectedGraphSAGENodeGenerator(
g, batch_size=len(g), in_samples=[1, 1], out_samples=[1, 1]
)
flow = gen.flow(node_ids=nodes, shuffle=False)
features = gen.sample_features(nodes)
num_hops = 2
tree_len = 2 ** (num_hops + 1) - 1
assert len(features) == tree_len
# Check node features
node_features = features[0]
assert len(node_features) == len(nodes)
assert node_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
assert node_features[idx, 0, 0] == -1.0 * node
# Check in-node features
in_features = features[1]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *1 -> 2
assert in_features[idx, 0, 0] == -1.0
elif node == 3:
# *2 -> 3
assert in_features[idx, 0, 0] == -2.0
else:
assert False
# Check out-node features
out_features = features[2]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> *2
assert out_features[idx, 0, 0] == -2.0
elif node == 2:
# 2 -> *3
assert out_features[idx, 0, 0] == -3.0
elif node == 3:
# 3 -> *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
# Check in-in-node features
in_features = features[3]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None -> None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *None -> 1 -> 2
assert in_features[idx, 0, 0] == 0.0
elif node == 3:
# *1 -> 2 -> 3
assert in_features[idx, 0, 0] == -1.0
else:
assert False
# Check in-out-node features
in_features = features[4]
assert in_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# *None <- None -> 1
assert in_features[idx, 0, 0] == 0.0
elif node == 2:
# *2 <- 1 -> 2
assert in_features[idx, 0, 0] == -2.0
elif node == 3:
# *3 <- 2 -> 3
assert in_features[idx, 0, 0] == -3.0
else:
assert False
# Check out-in-node features
out_features = features[5]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2 <- *1
assert out_features[idx, 0, 0] == -1.0
elif node == 2:
# 2 -> 3 <- *2
assert out_features[idx, 0, 0] == -2.0
elif node == 3:
# 3 -> None <- *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False
# Check out-out-node features
out_features = features[6]
assert out_features.shape == (len(nodes), 1, 1)
for idx, node in enumerate(nodes):
if node == 1:
# 1 -> 2 -> *3
assert out_features[idx, 0, 0] == -3.0
elif node == 2:
# 2 -> 3 -> *None
assert out_features[idx, 0, 0] == 0.0
elif node == 3:
# 3 -> None -> *None
assert out_features[idx, 0, 0] == 0.0
else:
assert False | 0.765506 | 0.708944 |
import torch
from torch.utils.data import Dataset
import numpy as np
import dgl
from dgl.data.utils import download, extract_archive, get_download_dir
from .mol_tree_nx import DGLMolTree
from .mol_tree import Vocab
from .mpn import mol2dgl_single as mol2dgl_enc
from .jtmpn import mol2dgl_single as mol2dgl_dec
from .jtmpn import ATOM_FDIM as ATOM_FDIM_DEC
from .jtmpn import BOND_FDIM as BOND_FDIM_DEC
_url = 'https://s3-ap-southeast-1.amazonaws.com/dgl-data-cn/dataset/jtnn.zip'
def _unpack_field(examples, field):
return [e[field] for e in examples]
def _set_node_id(mol_tree, vocab):
wid = []
for i, node in enumerate(mol_tree.nodes_dict):
mol_tree.nodes_dict[node]['idx'] = i
wid.append(vocab.get_index(mol_tree.nodes_dict[node]['smiles']))
return wid
class JTNNDataset(Dataset):
def __init__(self, data, vocab, training=True):
self.dir = get_download_dir()
self.zip_file_path='{}/jtnn.zip'.format(self.dir)
download(_url, path=self.zip_file_path)
extract_archive(self.zip_file_path, '{}/jtnn'.format(self.dir))
print('Loading data...')
data_file = '{}/jtnn/{}.txt'.format(self.dir, data)
with open(data_file) as f:
self.data = [line.strip("\r\n ").split()[0] for line in f]
self.vocab_file = '{}/jtnn/{}.txt'.format(self.dir, vocab)
print('Loading finished.')
print('\tNum samples:', len(self.data))
print('\tVocab file:', self.vocab_file)
self.training = training
self.vocab = Vocab([x.strip("\r\n ") for x in open(self.vocab_file)])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
smiles = self.data[idx]
mol_tree = DGLMolTree(smiles)
mol_tree.recover()
mol_tree.assemble()
wid = _set_node_id(mol_tree, self.vocab)
# prebuild the molecule graph
mol_graph, atom_x_enc, bond_x_enc = mol2dgl_enc(mol_tree.smiles)
result = {
'mol_tree': mol_tree,
'mol_graph': mol_graph,
'atom_x_enc': atom_x_enc,
'bond_x_enc': bond_x_enc,
'wid': wid,
}
if not self.training:
return result
# prebuild the candidate graph list
cands = []
for node_id, node in mol_tree.nodes_dict.items():
# fill in ground truth
if node['label'] not in node['cands']:
node['cands'].append(node['label'])
node['cand_mols'].append(node['label_mol'])
if node['is_leaf'] or len(node['cands']) == 1:
continue
cands.extend([(cand, mol_tree, node_id)
for cand in node['cand_mols']])
if len(cands) > 0:
cand_graphs, atom_x_dec, bond_x_dec, tree_mess_src_e, \
tree_mess_tgt_e, tree_mess_tgt_n = mol2dgl_dec(cands)
else:
cand_graphs = []
atom_x_dec = torch.zeros(0, ATOM_FDIM_DEC)
bond_x_dec = torch.zeros(0, BOND_FDIM_DEC)
tree_mess_src_e = torch.zeros(0, 2).long()
tree_mess_tgt_e = torch.zeros(0, 2).long()
tree_mess_tgt_n = torch.zeros(0).long()
# prebuild the stereoisomers
cands = mol_tree.stereo_cands
if len(cands) > 1:
if mol_tree.smiles3D not in cands:
cands.append(mol_tree.smiles3D)
stereo_graphs = [mol2dgl_enc(c) for c in cands]
stereo_cand_graphs, stereo_atom_x_enc, stereo_bond_x_enc = \
zip(*stereo_graphs)
stereo_atom_x_enc = torch.cat(stereo_atom_x_enc)
stereo_bond_x_enc = torch.cat(stereo_bond_x_enc)
stereo_cand_label = [(cands.index(mol_tree.smiles3D), len(cands))]
else:
stereo_cand_graphs = []
stereo_atom_x_enc = torch.zeros(0, atom_x_enc.shape[1])
stereo_bond_x_enc = torch.zeros(0, bond_x_enc.shape[1])
stereo_cand_label = []
result.update({
'cand_graphs': cand_graphs,
'atom_x_dec': atom_x_dec,
'bond_x_dec': bond_x_dec,
'tree_mess_src_e': tree_mess_src_e,
'tree_mess_tgt_e': tree_mess_tgt_e,
'tree_mess_tgt_n': tree_mess_tgt_n,
'stereo_cand_graphs': stereo_cand_graphs,
'stereo_atom_x_enc': stereo_atom_x_enc,
'stereo_bond_x_enc': stereo_bond_x_enc,
'stereo_cand_label': stereo_cand_label,
})
return result
class JTNNCollator(object):
def __init__(self, vocab, training):
self.vocab = vocab
self.training = training
@staticmethod
def _batch_and_set(graphs, atom_x, bond_x, flatten):
if flatten:
graphs = [g for f in graphs for g in f]
graph_batch = dgl.batch(graphs)
graph_batch.ndata['x'] = atom_x
graph_batch.edata.update({
'x': bond_x,
'src_x': atom_x.new(bond_x.shape[0], atom_x.shape[1]).zero_(),
})
return graph_batch
def __call__(self, examples):
# get list of trees
mol_trees = _unpack_field(examples, 'mol_tree')
wid = _unpack_field(examples, 'wid')
for _wid, mol_tree in zip(wid, mol_trees):
mol_tree.ndata['wid'] = torch.LongTensor(_wid)
# TODO: either support pickling or get around ctypes pointers using scipy
# batch molecule graphs
mol_graphs = _unpack_field(examples, 'mol_graph')
atom_x = torch.cat(_unpack_field(examples, 'atom_x_enc'))
bond_x = torch.cat(_unpack_field(examples, 'bond_x_enc'))
mol_graph_batch = self._batch_and_set(mol_graphs, atom_x, bond_x, False)
result = {
'mol_trees': mol_trees,
'mol_graph_batch': mol_graph_batch,
}
if not self.training:
return result
# batch candidate graphs
cand_graphs = _unpack_field(examples, 'cand_graphs')
cand_batch_idx = []
atom_x = torch.cat(_unpack_field(examples, 'atom_x_dec'))
bond_x = torch.cat(_unpack_field(examples, 'bond_x_dec'))
tree_mess_src_e = _unpack_field(examples, 'tree_mess_src_e')
tree_mess_tgt_e = _unpack_field(examples, 'tree_mess_tgt_e')
tree_mess_tgt_n = _unpack_field(examples, 'tree_mess_tgt_n')
n_graph_nodes = 0
n_tree_nodes = 0
for i in range(len(cand_graphs)):
tree_mess_tgt_e[i] += n_graph_nodes
tree_mess_src_e[i] += n_tree_nodes
tree_mess_tgt_n[i] += n_graph_nodes
n_graph_nodes += sum(g.number_of_nodes() for g in cand_graphs[i])
n_tree_nodes += mol_trees[i].number_of_nodes()
cand_batch_idx.extend([i] * len(cand_graphs[i]))
tree_mess_tgt_e = torch.cat(tree_mess_tgt_e)
tree_mess_src_e = torch.cat(tree_mess_src_e)
tree_mess_tgt_n = torch.cat(tree_mess_tgt_n)
cand_graph_batch = self._batch_and_set(cand_graphs, atom_x, bond_x, True)
# batch stereoisomers
stereo_cand_graphs = _unpack_field(examples, 'stereo_cand_graphs')
atom_x = torch.cat(_unpack_field(examples, 'stereo_atom_x_enc'))
bond_x = torch.cat(_unpack_field(examples, 'stereo_bond_x_enc'))
stereo_cand_batch_idx = []
for i in range(len(stereo_cand_graphs)):
stereo_cand_batch_idx.extend([i] * len(stereo_cand_graphs[i]))
if len(stereo_cand_batch_idx) > 0:
stereo_cand_labels = [
(label, length)
for ex in _unpack_field(examples, 'stereo_cand_label')
for label, length in ex
]
stereo_cand_labels, stereo_cand_lengths = zip(*stereo_cand_labels)
stereo_cand_graph_batch = self._batch_and_set(
stereo_cand_graphs, atom_x, bond_x, True)
else:
stereo_cand_labels = []
stereo_cand_lengths = []
stereo_cand_graph_batch = None
stereo_cand_batch_idx = []
result.update({
'cand_graph_batch': cand_graph_batch,
'cand_batch_idx': cand_batch_idx,
'tree_mess_tgt_e': tree_mess_tgt_e,
'tree_mess_src_e': tree_mess_src_e,
'tree_mess_tgt_n': tree_mess_tgt_n,
'stereo_cand_graph_batch': stereo_cand_graph_batch,
'stereo_cand_batch_idx': stereo_cand_batch_idx,
'stereo_cand_labels': stereo_cand_labels,
'stereo_cand_lengths': stereo_cand_lengths,
})
return result | examples/pytorch/jtnn/jtnn/datautils.py | import torch
from torch.utils.data import Dataset
import numpy as np
import dgl
from dgl.data.utils import download, extract_archive, get_download_dir
from .mol_tree_nx import DGLMolTree
from .mol_tree import Vocab
from .mpn import mol2dgl_single as mol2dgl_enc
from .jtmpn import mol2dgl_single as mol2dgl_dec
from .jtmpn import ATOM_FDIM as ATOM_FDIM_DEC
from .jtmpn import BOND_FDIM as BOND_FDIM_DEC
_url = 'https://s3-ap-southeast-1.amazonaws.com/dgl-data-cn/dataset/jtnn.zip'
def _unpack_field(examples, field):
return [e[field] for e in examples]
def _set_node_id(mol_tree, vocab):
wid = []
for i, node in enumerate(mol_tree.nodes_dict):
mol_tree.nodes_dict[node]['idx'] = i
wid.append(vocab.get_index(mol_tree.nodes_dict[node]['smiles']))
return wid
class JTNNDataset(Dataset):
def __init__(self, data, vocab, training=True):
self.dir = get_download_dir()
self.zip_file_path='{}/jtnn.zip'.format(self.dir)
download(_url, path=self.zip_file_path)
extract_archive(self.zip_file_path, '{}/jtnn'.format(self.dir))
print('Loading data...')
data_file = '{}/jtnn/{}.txt'.format(self.dir, data)
with open(data_file) as f:
self.data = [line.strip("\r\n ").split()[0] for line in f]
self.vocab_file = '{}/jtnn/{}.txt'.format(self.dir, vocab)
print('Loading finished.')
print('\tNum samples:', len(self.data))
print('\tVocab file:', self.vocab_file)
self.training = training
self.vocab = Vocab([x.strip("\r\n ") for x in open(self.vocab_file)])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
smiles = self.data[idx]
mol_tree = DGLMolTree(smiles)
mol_tree.recover()
mol_tree.assemble()
wid = _set_node_id(mol_tree, self.vocab)
# prebuild the molecule graph
mol_graph, atom_x_enc, bond_x_enc = mol2dgl_enc(mol_tree.smiles)
result = {
'mol_tree': mol_tree,
'mol_graph': mol_graph,
'atom_x_enc': atom_x_enc,
'bond_x_enc': bond_x_enc,
'wid': wid,
}
if not self.training:
return result
# prebuild the candidate graph list
cands = []
for node_id, node in mol_tree.nodes_dict.items():
# fill in ground truth
if node['label'] not in node['cands']:
node['cands'].append(node['label'])
node['cand_mols'].append(node['label_mol'])
if node['is_leaf'] or len(node['cands']) == 1:
continue
cands.extend([(cand, mol_tree, node_id)
for cand in node['cand_mols']])
if len(cands) > 0:
cand_graphs, atom_x_dec, bond_x_dec, tree_mess_src_e, \
tree_mess_tgt_e, tree_mess_tgt_n = mol2dgl_dec(cands)
else:
cand_graphs = []
atom_x_dec = torch.zeros(0, ATOM_FDIM_DEC)
bond_x_dec = torch.zeros(0, BOND_FDIM_DEC)
tree_mess_src_e = torch.zeros(0, 2).long()
tree_mess_tgt_e = torch.zeros(0, 2).long()
tree_mess_tgt_n = torch.zeros(0).long()
# prebuild the stereoisomers
cands = mol_tree.stereo_cands
if len(cands) > 1:
if mol_tree.smiles3D not in cands:
cands.append(mol_tree.smiles3D)
stereo_graphs = [mol2dgl_enc(c) for c in cands]
stereo_cand_graphs, stereo_atom_x_enc, stereo_bond_x_enc = \
zip(*stereo_graphs)
stereo_atom_x_enc = torch.cat(stereo_atom_x_enc)
stereo_bond_x_enc = torch.cat(stereo_bond_x_enc)
stereo_cand_label = [(cands.index(mol_tree.smiles3D), len(cands))]
else:
stereo_cand_graphs = []
stereo_atom_x_enc = torch.zeros(0, atom_x_enc.shape[1])
stereo_bond_x_enc = torch.zeros(0, bond_x_enc.shape[1])
stereo_cand_label = []
result.update({
'cand_graphs': cand_graphs,
'atom_x_dec': atom_x_dec,
'bond_x_dec': bond_x_dec,
'tree_mess_src_e': tree_mess_src_e,
'tree_mess_tgt_e': tree_mess_tgt_e,
'tree_mess_tgt_n': tree_mess_tgt_n,
'stereo_cand_graphs': stereo_cand_graphs,
'stereo_atom_x_enc': stereo_atom_x_enc,
'stereo_bond_x_enc': stereo_bond_x_enc,
'stereo_cand_label': stereo_cand_label,
})
return result
class JTNNCollator(object):
def __init__(self, vocab, training):
self.vocab = vocab
self.training = training
@staticmethod
def _batch_and_set(graphs, atom_x, bond_x, flatten):
if flatten:
graphs = [g for f in graphs for g in f]
graph_batch = dgl.batch(graphs)
graph_batch.ndata['x'] = atom_x
graph_batch.edata.update({
'x': bond_x,
'src_x': atom_x.new(bond_x.shape[0], atom_x.shape[1]).zero_(),
})
return graph_batch
def __call__(self, examples):
# get list of trees
mol_trees = _unpack_field(examples, 'mol_tree')
wid = _unpack_field(examples, 'wid')
for _wid, mol_tree in zip(wid, mol_trees):
mol_tree.ndata['wid'] = torch.LongTensor(_wid)
# TODO: either support pickling or get around ctypes pointers using scipy
# batch molecule graphs
mol_graphs = _unpack_field(examples, 'mol_graph')
atom_x = torch.cat(_unpack_field(examples, 'atom_x_enc'))
bond_x = torch.cat(_unpack_field(examples, 'bond_x_enc'))
mol_graph_batch = self._batch_and_set(mol_graphs, atom_x, bond_x, False)
result = {
'mol_trees': mol_trees,
'mol_graph_batch': mol_graph_batch,
}
if not self.training:
return result
# batch candidate graphs
cand_graphs = _unpack_field(examples, 'cand_graphs')
cand_batch_idx = []
atom_x = torch.cat(_unpack_field(examples, 'atom_x_dec'))
bond_x = torch.cat(_unpack_field(examples, 'bond_x_dec'))
tree_mess_src_e = _unpack_field(examples, 'tree_mess_src_e')
tree_mess_tgt_e = _unpack_field(examples, 'tree_mess_tgt_e')
tree_mess_tgt_n = _unpack_field(examples, 'tree_mess_tgt_n')
n_graph_nodes = 0
n_tree_nodes = 0
for i in range(len(cand_graphs)):
tree_mess_tgt_e[i] += n_graph_nodes
tree_mess_src_e[i] += n_tree_nodes
tree_mess_tgt_n[i] += n_graph_nodes
n_graph_nodes += sum(g.number_of_nodes() for g in cand_graphs[i])
n_tree_nodes += mol_trees[i].number_of_nodes()
cand_batch_idx.extend([i] * len(cand_graphs[i]))
tree_mess_tgt_e = torch.cat(tree_mess_tgt_e)
tree_mess_src_e = torch.cat(tree_mess_src_e)
tree_mess_tgt_n = torch.cat(tree_mess_tgt_n)
cand_graph_batch = self._batch_and_set(cand_graphs, atom_x, bond_x, True)
# batch stereoisomers
stereo_cand_graphs = _unpack_field(examples, 'stereo_cand_graphs')
atom_x = torch.cat(_unpack_field(examples, 'stereo_atom_x_enc'))
bond_x = torch.cat(_unpack_field(examples, 'stereo_bond_x_enc'))
stereo_cand_batch_idx = []
for i in range(len(stereo_cand_graphs)):
stereo_cand_batch_idx.extend([i] * len(stereo_cand_graphs[i]))
if len(stereo_cand_batch_idx) > 0:
stereo_cand_labels = [
(label, length)
for ex in _unpack_field(examples, 'stereo_cand_label')
for label, length in ex
]
stereo_cand_labels, stereo_cand_lengths = zip(*stereo_cand_labels)
stereo_cand_graph_batch = self._batch_and_set(
stereo_cand_graphs, atom_x, bond_x, True)
else:
stereo_cand_labels = []
stereo_cand_lengths = []
stereo_cand_graph_batch = None
stereo_cand_batch_idx = []
result.update({
'cand_graph_batch': cand_graph_batch,
'cand_batch_idx': cand_batch_idx,
'tree_mess_tgt_e': tree_mess_tgt_e,
'tree_mess_src_e': tree_mess_src_e,
'tree_mess_tgt_n': tree_mess_tgt_n,
'stereo_cand_graph_batch': stereo_cand_graph_batch,
'stereo_cand_batch_idx': stereo_cand_batch_idx,
'stereo_cand_labels': stereo_cand_labels,
'stereo_cand_lengths': stereo_cand_lengths,
})
return result | 0.494873 | 0.350727 |
from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db.models import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext as _
from django.views import generic
from plata.discount.models import Discount
from plata.shop import forms as shop_forms
from plata.shop.views import Shop
from plata.shop.models import Order
from custom.models import Contact, Product
class CheckoutForm(shop_forms.BaseCheckoutForm):
class Meta:
fields = ["email"] + ["billing_%s" % f for f in Contact.ADDRESS_FIELDS]
model = Order
def __init__(self, *args, **kwargs):
shop = kwargs.get("shop")
request = kwargs.get("request")
contact = shop.contact_from_user(request.user)
if contact:
initial = {}
for f in contact.ADDRESS_FIELDS:
initial["billing_%s" % f] = getattr(contact, f)
kwargs["initial"] = initial
initial["email"] = contact.user.email
super(CheckoutForm, self).__init__(*args, **kwargs)
if not contact:
self.fields["create_account"] = forms.BooleanField(
label=_("create account"), required=False, initial=True
)
class CustomShop(Shop):
def checkout_form(self, request, order):
return CheckoutForm
shop = CustomShop(Contact, Order, Discount)
product_list = generic.ListView.as_view(
queryset=Product.objects.filter(is_active=True),
template_name="product/product_list.html",
)
class OrderItemForm(forms.Form):
quantity = forms.IntegerField(
label=_("quantity"), initial=1, min_value=1, max_value=100
)
def product_detail(request, object_id):
product = get_object_or_404(Product.objects.filter(is_active=True), pk=object_id)
if request.method == "POST":
form = OrderItemForm(request.POST)
if form.is_valid():
order = shop.order_from_request(request, create=True)
try:
order.modify_item(product, form.cleaned_data.get("quantity"))
messages.success(request, _("The cart has been updated."))
except ValidationError as e:
if e.code == "order_sealed":
[messages.error(request, msg) for msg in e.messages]
else:
raise
return redirect("plata_shop_cart")
else:
form = OrderItemForm()
return render(
request, "product/product_detail.html", {"object": product, "form": form}
) | examples/custom/views.py | from django import forms
from django.contrib import messages
from django.core.exceptions import ValidationError
from django.db.models import ObjectDoesNotExist
from django.shortcuts import get_object_or_404, redirect, render
from django.utils.translation import ugettext as _
from django.views import generic
from plata.discount.models import Discount
from plata.shop import forms as shop_forms
from plata.shop.views import Shop
from plata.shop.models import Order
from custom.models import Contact, Product
class CheckoutForm(shop_forms.BaseCheckoutForm):
class Meta:
fields = ["email"] + ["billing_%s" % f for f in Contact.ADDRESS_FIELDS]
model = Order
def __init__(self, *args, **kwargs):
shop = kwargs.get("shop")
request = kwargs.get("request")
contact = shop.contact_from_user(request.user)
if contact:
initial = {}
for f in contact.ADDRESS_FIELDS:
initial["billing_%s" % f] = getattr(contact, f)
kwargs["initial"] = initial
initial["email"] = contact.user.email
super(CheckoutForm, self).__init__(*args, **kwargs)
if not contact:
self.fields["create_account"] = forms.BooleanField(
label=_("create account"), required=False, initial=True
)
class CustomShop(Shop):
def checkout_form(self, request, order):
return CheckoutForm
shop = CustomShop(Contact, Order, Discount)
product_list = generic.ListView.as_view(
queryset=Product.objects.filter(is_active=True),
template_name="product/product_list.html",
)
class OrderItemForm(forms.Form):
quantity = forms.IntegerField(
label=_("quantity"), initial=1, min_value=1, max_value=100
)
def product_detail(request, object_id):
product = get_object_or_404(Product.objects.filter(is_active=True), pk=object_id)
if request.method == "POST":
form = OrderItemForm(request.POST)
if form.is_valid():
order = shop.order_from_request(request, create=True)
try:
order.modify_item(product, form.cleaned_data.get("quantity"))
messages.success(request, _("The cart has been updated."))
except ValidationError as e:
if e.code == "order_sealed":
[messages.error(request, msg) for msg in e.messages]
else:
raise
return redirect("plata_shop_cart")
else:
form = OrderItemForm()
return render(
request, "product/product_detail.html", {"object": product, "form": form}
) | 0.516108 | 0.10923 |
import os
import re
import inspect
import importlib
from lxml import etree
import click
import jinja2
from prompt_toolkit import (
prompt
)
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.shortcuts import print_tokens
from botocore import xform_name
from botocore.session import Session
import boto3
from moto.core.responses import BaseResponse
from moto.core import BaseBackend
from implementation_coverage import (
get_moto_implementation
)
from inflection import singularize
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), './template')
INPUT_IGNORED_IN_BACKEND = ['Marker', 'PageSize']
OUTPUT_IGNORED_IN_BACKEND = ['NextMarker']
def print_progress(title, body, color):
click.secho(u'\t{}\t'.format(title), fg=color, nl=False)
click.echo(body)
def select_service_and_operation():
service_names = Session().get_available_services()
service_completer = WordCompleter(service_names)
service_name = prompt(u'Select service: ', completer=service_completer)
if service_name not in service_names:
click.secho(u'{} is not valid service'.format(service_name), fg='red')
raise click.Abort()
moto_client = get_moto_implementation(service_name)
real_client = boto3.client(service_name, region_name='us-east-1')
implemented = []
not_implemented = []
operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names]
for op in operation_names:
if moto_client and op in dir(moto_client):
implemented.append(op)
else:
not_implemented.append(op)
operation_completer = WordCompleter(operation_names)
click.echo('==Current Implementation Status==')
for operation_name in operation_names:
check = 'X' if operation_name in implemented else ' '
click.secho('[{}] {}'.format(check, operation_name))
click.echo('=================================')
operation_name = prompt(u'Select Operation: ', completer=operation_completer)
if operation_name not in operation_names:
click.secho('{} is not valid operation'.format(operation_name), fg='red')
raise click.Abort()
if operation_name in implemented:
click.secho('{} is already implemented'.format(operation_name), fg='red')
raise click.Abort()
return service_name, operation_name
def get_escaped_service(service):
return service.replace('-', '')
def get_lib_dir(service):
return os.path.join('moto', get_escaped_service(service))
def get_test_dir(service):
return os.path.join('tests', 'test_{}'.format(get_escaped_service(service)))
def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None):
is_test = True if 'test' in tmpl_dir else False
rendered = jinja2.Environment(
loader=jinja2.FileSystemLoader(tmpl_dir)
).get_template(tmpl_filename).render(context)
dirname = get_test_dir(service) if is_test else get_lib_dir(service)
filename = alt_filename or os.path.splitext(tmpl_filename)[0]
filepath = os.path.join(dirname, filename)
if os.path.exists(filepath):
print_progress('skip creating', filepath, 'yellow')
else:
print_progress('creating', filepath, 'green')
with open(filepath, 'w') as f:
f.write(rendered)
def append_mock_to_init_py(service):
path = os.path.join(os.path.dirname(__file__), '..', 'moto', '__init__.py')
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match('^from.*mock_{}.*$'.format(service), _)):
return
filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)]
last_import_line_index = lines.index(filtered_lines[-1])
new_line = 'from .{} import mock_{} # noqa'.format(get_escaped_service(service), get_escaped_service(service))
lines.insert(last_import_line_index + 1, new_line)
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def append_mock_import_to_backends_py(service):
path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py')
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match('^from moto\.{}.*{}_backends.*$'.format(service, service), _)):
return
filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)]
last_import_line_index = lines.index(filtered_lines[-1])
new_line = 'from moto.{} import {}_backends'.format(get_escaped_service(service), get_escaped_service(service))
lines.insert(last_import_line_index + 1, new_line)
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def append_mock_dict_to_backends_py(service):
path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py')
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)):
return
filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)]
last_elem_line_index = lines.index(filtered_lines[-1])
new_line = " '{}': {}_backends,".format(service, get_escaped_service(service))
prev_line = lines[last_elem_line_index]
if not prev_line.endswith('{') and not prev_line.endswith(','):
lines[last_elem_line_index] += ','
lines.insert(last_elem_line_index + 1, new_line)
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def initialize_service(service, operation, api_protocol):
"""create lib and test dirs if not exist
"""
lib_dir = get_lib_dir(service)
test_dir = get_test_dir(service)
print_progress('Initializing service', service, 'green')
client = boto3.client(service)
service_class = client.__class__.__name__
endpoint_prefix = client._service_model.endpoint_prefix
tmpl_context = {
'service': service,
'service_class': service_class,
'endpoint_prefix': endpoint_prefix,
'api_protocol': api_protocol,
'escaped_service': get_escaped_service(service)
}
# initialize service directory
if os.path.exists(lib_dir):
print_progress('skip creating', lib_dir, 'yellow')
else:
print_progress('creating', lib_dir, 'green')
os.makedirs(lib_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib')
for tmpl_filename in os.listdir(tmpl_dir):
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service
)
# initialize test directory
if os.path.exists(test_dir):
print_progress('skip creating', test_dir, 'yellow')
else:
print_progress('creating', test_dir, 'green')
os.makedirs(test_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'test')
for tmpl_filename in os.listdir(tmpl_dir):
alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename
)
# append mock to init files
append_mock_to_init_py(service)
append_mock_import_to_backends_py(service)
append_mock_dict_to_backends_py(service)
def to_upper_camel_case(s):
return ''.join([_.title() for _ in s.split('_')])
def to_lower_camel_case(s):
words = s.split('_')
return ''.join(words[:1] + [_.title() for _ in words[1:]])
def to_snake_case(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def get_function_in_responses(service, operation, protocol):
"""refers to definition of API in botocore, and autogenerates function
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
if not hasattr(op_model.output_shape, 'members'):
outputs = {}
else:
outputs = op_model.output_shape.members
inputs = op_model.input_shape.members
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
body = '\ndef {}(self):\n'.format(operation)
for input_name, input_type in inputs.items():
type_name = input_type.type_name
if type_name == 'integer':
arg_line_tmpl = ' {} = self._get_int_param("{}")\n'
elif type_name == 'list':
arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n'
else:
arg_line_tmpl = ' {} = self._get_param("{}")\n'
body += arg_line_tmpl.format(to_snake_case(input_name), input_name)
if output_names:
body += ' {} = self.{}_backend.{}(\n'.format(', '.join(output_names), get_escaped_service(service), operation)
else:
body += ' self.{}_backend.{}(\n'.format(get_escaped_service(service), operation)
for input_name in input_names:
body += ' {}={},\n'.format(input_name, input_name)
body += ' )\n'
if protocol == 'query':
body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper())
body += ' return template.render({})\n'.format(
', '.join(['{}={}'.format(_, _) for _ in output_names])
)
elif protocol in ['json', 'rest-json']:
body += ' # TODO: adjust response\n'
body += ' return json.dumps(dict({}))\n'.format(', '.join(['{}={}'.format(to_lower_camel_case(_), _) for _ in output_names]))
return body
def get_function_in_models(service, operation):
"""refers to definition of API in botocore, and autogenerates function
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
inputs = op_model.input_shape.members
if not hasattr(op_model.output_shape, 'members'):
outputs = {}
else:
outputs = op_model.output_shape.members
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
if input_names:
body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names))
else:
body = 'def {}(self)\n'
body += ' # implement here\n'
body += ' return {}\n\n'.format(', '.join(output_names))
return body
def _get_subtree(name, shape, replace_list, name_prefix=[]):
class_name = shape.__class__.__name__
if class_name in ('StringShape', 'Shape'):
t = etree.Element(name)
if name_prefix:
t.text = '{{ %s.%s }}' % (name_prefix[-1], to_snake_case(name))
else:
t.text = '{{ %s }}' % to_snake_case(name)
return t
elif class_name in ('ListShape', ):
replace_list.append((name, name_prefix))
t = etree.Element(name)
t_member = etree.Element('member')
t.append(t_member)
for nested_name, nested_shape in shape.member.members.items():
t_member.append(_get_subtree(nested_name, nested_shape, replace_list, name_prefix + [singularize(name.lower())]))
return t
raise ValueError('Not supported Shape')
def get_response_query_template(service, operation):
"""refers to definition of API in botocore, and autogenerates template
Assume that response format is xml when protocol is query
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
result_wrapper = op_model.output_shape.serialization['resultWrapper']
response_wrapper = result_wrapper.replace('Result', 'Response')
metadata = op_model.metadata
xml_namespace = metadata['xmlNamespace']
# build xml tree
t_root = etree.Element(response_wrapper, xmlns=xml_namespace)
# build metadata
t_metadata = etree.Element('ResponseMetadata')
t_request_id = etree.Element('RequestId')
t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE'
t_metadata.append(t_request_id)
t_root.append(t_metadata)
# build result
t_result = etree.Element(result_wrapper)
outputs = op_model.output_shape.members
replace_list = []
for output_name, output_shape in outputs.items():
t_result.append(_get_subtree(output_name, output_shape, replace_list))
t_root.append(t_result)
xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8')
xml_body_lines = xml_body.splitlines()
for replace in replace_list:
name = replace[0]
prefix = replace[1]
singular_name = singularize(name)
start_tag = '<%s>' % name
iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower()
loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name)
end_tag = '</%s>' % name
loop_end = '{{ endfor }}'
start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l]
if len(start_tag_indexes) != 1:
raise Exception('tag %s not found in response body' % start_tag)
start_tag_index = start_tag_indexes[0]
xml_body_lines.insert(start_tag_index + 1, loop_start)
end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l]
if len(end_tag_indexes) != 1:
raise Exception('tag %s not found in response body' % end_tag)
end_tag_index = end_tag_indexes[0]
xml_body_lines.insert(end_tag_index, loop_end)
xml_body = '\n'.join(xml_body_lines)
body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body)
return body
def insert_code_to_class(path, base_class, new_code):
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
mod_path = os.path.splitext(path)[0].replace('/', '.')
mod = importlib.import_module(mod_path)
clsmembers = inspect.getmembers(mod, inspect.isclass)
_response_cls = [_[1] for _ in clsmembers if issubclass(_[1], base_class) and _[1] != base_class]
if len(_response_cls) != 1:
raise Exception('unknown error, number of clsmembers is not 1')
response_cls = _response_cls[0]
code_lines, line_no = inspect.getsourcelines(response_cls)
end_line_no = line_no + len(code_lines)
func_lines = [' ' * 4 + _ for _ in new_code.splitlines()]
lines = lines[:end_line_no] + func_lines + lines[end_line_no:]
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def insert_url(service, operation, api_protocol):
client = boto3.client(service)
service_class = client.__class__.__name__
aws_operation_name = to_upper_camel_case(operation)
uri = client._service_model.operation_model(aws_operation_name).http['requestUri']
path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py')
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match(uri, _)):
return
url_paths_found = False
last_elem_line_index = -1
for i, line in enumerate(lines):
if line.startswith('url_paths'):
url_paths_found = True
if url_paths_found and line.startswith('}'):
last_elem_line_index = i - 1
prev_line = lines[last_elem_line_index]
if not prev_line.endswith('{') and not prev_line.endswith(','):
lines[last_elem_line_index] += ','
# generate url pattern
if api_protocol == 'rest-json':
new_line = " '{0}/.*$': response.dispatch,"
else:
new_line = " '{0}%s$': %sResponse.dispatch," % (
uri, service_class
)
if new_line in lines:
return
lines.insert(last_elem_line_index + 1, new_line)
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def insert_codes(service, operation, api_protocol):
func_in_responses = get_function_in_responses(service, operation, api_protocol)
func_in_models = get_function_in_models(service, operation)
# edit responses.py
responses_path = 'moto/{}/responses.py'.format(get_escaped_service(service))
print_progress('inserting code', responses_path, 'green')
insert_code_to_class(responses_path, BaseResponse, func_in_responses)
# insert template
if api_protocol == 'query':
template = get_response_query_template(service, operation)
with open(responses_path) as f:
lines = [_[:-1] for _ in f.readlines()]
lines += template.splitlines()
with open(responses_path, 'w') as f:
f.write('\n'.join(lines))
# edit models.py
models_path = 'moto/{}/models.py'.format(get_escaped_service(service))
print_progress('inserting code', models_path, 'green')
insert_code_to_class(models_path, BaseBackend, func_in_models)
# edit urls.py
insert_url(service, operation, api_protocol)
@click.command()
def main():
service, operation = select_service_and_operation()
api_protocol = boto3.client(service)._service_model.metadata['protocol']
initialize_service(service, operation, api_protocol)
if api_protocol in ['query', 'json', 'rest-json']:
insert_codes(service, operation, api_protocol)
else:
print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow')
click.echo('You will still need to add the mock into "__init__.py"'.format(service))
if __name__ == '__main__':
main() | scripts/scaffold.py | import os
import re
import inspect
import importlib
from lxml import etree
import click
import jinja2
from prompt_toolkit import (
prompt
)
from prompt_toolkit.contrib.completers import WordCompleter
from prompt_toolkit.shortcuts import print_tokens
from botocore import xform_name
from botocore.session import Session
import boto3
from moto.core.responses import BaseResponse
from moto.core import BaseBackend
from implementation_coverage import (
get_moto_implementation
)
from inflection import singularize
TEMPLATE_DIR = os.path.join(os.path.dirname(__file__), './template')
INPUT_IGNORED_IN_BACKEND = ['Marker', 'PageSize']
OUTPUT_IGNORED_IN_BACKEND = ['NextMarker']
def print_progress(title, body, color):
click.secho(u'\t{}\t'.format(title), fg=color, nl=False)
click.echo(body)
def select_service_and_operation():
service_names = Session().get_available_services()
service_completer = WordCompleter(service_names)
service_name = prompt(u'Select service: ', completer=service_completer)
if service_name not in service_names:
click.secho(u'{} is not valid service'.format(service_name), fg='red')
raise click.Abort()
moto_client = get_moto_implementation(service_name)
real_client = boto3.client(service_name, region_name='us-east-1')
implemented = []
not_implemented = []
operation_names = [xform_name(op) for op in real_client.meta.service_model.operation_names]
for op in operation_names:
if moto_client and op in dir(moto_client):
implemented.append(op)
else:
not_implemented.append(op)
operation_completer = WordCompleter(operation_names)
click.echo('==Current Implementation Status==')
for operation_name in operation_names:
check = 'X' if operation_name in implemented else ' '
click.secho('[{}] {}'.format(check, operation_name))
click.echo('=================================')
operation_name = prompt(u'Select Operation: ', completer=operation_completer)
if operation_name not in operation_names:
click.secho('{} is not valid operation'.format(operation_name), fg='red')
raise click.Abort()
if operation_name in implemented:
click.secho('{} is already implemented'.format(operation_name), fg='red')
raise click.Abort()
return service_name, operation_name
def get_escaped_service(service):
return service.replace('-', '')
def get_lib_dir(service):
return os.path.join('moto', get_escaped_service(service))
def get_test_dir(service):
return os.path.join('tests', 'test_{}'.format(get_escaped_service(service)))
def render_template(tmpl_dir, tmpl_filename, context, service, alt_filename=None):
is_test = True if 'test' in tmpl_dir else False
rendered = jinja2.Environment(
loader=jinja2.FileSystemLoader(tmpl_dir)
).get_template(tmpl_filename).render(context)
dirname = get_test_dir(service) if is_test else get_lib_dir(service)
filename = alt_filename or os.path.splitext(tmpl_filename)[0]
filepath = os.path.join(dirname, filename)
if os.path.exists(filepath):
print_progress('skip creating', filepath, 'yellow')
else:
print_progress('creating', filepath, 'green')
with open(filepath, 'w') as f:
f.write(rendered)
def append_mock_to_init_py(service):
path = os.path.join(os.path.dirname(__file__), '..', 'moto', '__init__.py')
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match('^from.*mock_{}.*$'.format(service), _)):
return
filtered_lines = [_ for _ in lines if re.match('^from.*mock.*$', _)]
last_import_line_index = lines.index(filtered_lines[-1])
new_line = 'from .{} import mock_{} # noqa'.format(get_escaped_service(service), get_escaped_service(service))
lines.insert(last_import_line_index + 1, new_line)
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def append_mock_import_to_backends_py(service):
path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py')
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match('^from moto\.{}.*{}_backends.*$'.format(service, service), _)):
return
filtered_lines = [_ for _ in lines if re.match('^from.*backends.*$', _)]
last_import_line_index = lines.index(filtered_lines[-1])
new_line = 'from moto.{} import {}_backends'.format(get_escaped_service(service), get_escaped_service(service))
lines.insert(last_import_line_index + 1, new_line)
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def append_mock_dict_to_backends_py(service):
path = os.path.join(os.path.dirname(__file__), '..', 'moto', 'backends.py')
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match(".*'{}': {}_backends.*".format(service, service), _)):
return
filtered_lines = [_ for _ in lines if re.match(".*'.*':.*_backends.*", _)]
last_elem_line_index = lines.index(filtered_lines[-1])
new_line = " '{}': {}_backends,".format(service, get_escaped_service(service))
prev_line = lines[last_elem_line_index]
if not prev_line.endswith('{') and not prev_line.endswith(','):
lines[last_elem_line_index] += ','
lines.insert(last_elem_line_index + 1, new_line)
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def initialize_service(service, operation, api_protocol):
"""create lib and test dirs if not exist
"""
lib_dir = get_lib_dir(service)
test_dir = get_test_dir(service)
print_progress('Initializing service', service, 'green')
client = boto3.client(service)
service_class = client.__class__.__name__
endpoint_prefix = client._service_model.endpoint_prefix
tmpl_context = {
'service': service,
'service_class': service_class,
'endpoint_prefix': endpoint_prefix,
'api_protocol': api_protocol,
'escaped_service': get_escaped_service(service)
}
# initialize service directory
if os.path.exists(lib_dir):
print_progress('skip creating', lib_dir, 'yellow')
else:
print_progress('creating', lib_dir, 'green')
os.makedirs(lib_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'lib')
for tmpl_filename in os.listdir(tmpl_dir):
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service
)
# initialize test directory
if os.path.exists(test_dir):
print_progress('skip creating', test_dir, 'yellow')
else:
print_progress('creating', test_dir, 'green')
os.makedirs(test_dir)
tmpl_dir = os.path.join(TEMPLATE_DIR, 'test')
for tmpl_filename in os.listdir(tmpl_dir):
alt_filename = 'test_{}.py'.format(get_escaped_service(service)) if tmpl_filename == 'test_service.py.j2' else None
render_template(
tmpl_dir, tmpl_filename, tmpl_context, service, alt_filename
)
# append mock to init files
append_mock_to_init_py(service)
append_mock_import_to_backends_py(service)
append_mock_dict_to_backends_py(service)
def to_upper_camel_case(s):
return ''.join([_.title() for _ in s.split('_')])
def to_lower_camel_case(s):
words = s.split('_')
return ''.join(words[:1] + [_.title() for _ in words[1:]])
def to_snake_case(s):
s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
def get_function_in_responses(service, operation, protocol):
"""refers to definition of API in botocore, and autogenerates function
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
if not hasattr(op_model.output_shape, 'members'):
outputs = {}
else:
outputs = op_model.output_shape.members
inputs = op_model.input_shape.members
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
body = '\ndef {}(self):\n'.format(operation)
for input_name, input_type in inputs.items():
type_name = input_type.type_name
if type_name == 'integer':
arg_line_tmpl = ' {} = self._get_int_param("{}")\n'
elif type_name == 'list':
arg_line_tmpl = ' {} = self._get_list_prefix("{}.member")\n'
else:
arg_line_tmpl = ' {} = self._get_param("{}")\n'
body += arg_line_tmpl.format(to_snake_case(input_name), input_name)
if output_names:
body += ' {} = self.{}_backend.{}(\n'.format(', '.join(output_names), get_escaped_service(service), operation)
else:
body += ' self.{}_backend.{}(\n'.format(get_escaped_service(service), operation)
for input_name in input_names:
body += ' {}={},\n'.format(input_name, input_name)
body += ' )\n'
if protocol == 'query':
body += ' template = self.response_template({}_TEMPLATE)\n'.format(operation.upper())
body += ' return template.render({})\n'.format(
', '.join(['{}={}'.format(_, _) for _ in output_names])
)
elif protocol in ['json', 'rest-json']:
body += ' # TODO: adjust response\n'
body += ' return json.dumps(dict({}))\n'.format(', '.join(['{}={}'.format(to_lower_camel_case(_), _) for _ in output_names]))
return body
def get_function_in_models(service, operation):
"""refers to definition of API in botocore, and autogenerates function
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
inputs = op_model.input_shape.members
if not hasattr(op_model.output_shape, 'members'):
outputs = {}
else:
outputs = op_model.output_shape.members
input_names = [to_snake_case(_) for _ in inputs.keys() if _ not in INPUT_IGNORED_IN_BACKEND]
output_names = [to_snake_case(_) for _ in outputs.keys() if _ not in OUTPUT_IGNORED_IN_BACKEND]
if input_names:
body = 'def {}(self, {}):\n'.format(operation, ', '.join(input_names))
else:
body = 'def {}(self)\n'
body += ' # implement here\n'
body += ' return {}\n\n'.format(', '.join(output_names))
return body
def _get_subtree(name, shape, replace_list, name_prefix=[]):
class_name = shape.__class__.__name__
if class_name in ('StringShape', 'Shape'):
t = etree.Element(name)
if name_prefix:
t.text = '{{ %s.%s }}' % (name_prefix[-1], to_snake_case(name))
else:
t.text = '{{ %s }}' % to_snake_case(name)
return t
elif class_name in ('ListShape', ):
replace_list.append((name, name_prefix))
t = etree.Element(name)
t_member = etree.Element('member')
t.append(t_member)
for nested_name, nested_shape in shape.member.members.items():
t_member.append(_get_subtree(nested_name, nested_shape, replace_list, name_prefix + [singularize(name.lower())]))
return t
raise ValueError('Not supported Shape')
def get_response_query_template(service, operation):
"""refers to definition of API in botocore, and autogenerates template
Assume that response format is xml when protocol is query
You can see example of elbv2 from link below.
https://github.com/boto/botocore/blob/develop/botocore/data/elbv2/2015-12-01/service-2.json
"""
client = boto3.client(service)
aws_operation_name = to_upper_camel_case(operation)
op_model = client._service_model.operation_model(aws_operation_name)
result_wrapper = op_model.output_shape.serialization['resultWrapper']
response_wrapper = result_wrapper.replace('Result', 'Response')
metadata = op_model.metadata
xml_namespace = metadata['xmlNamespace']
# build xml tree
t_root = etree.Element(response_wrapper, xmlns=xml_namespace)
# build metadata
t_metadata = etree.Element('ResponseMetadata')
t_request_id = etree.Element('RequestId')
t_request_id.text = '1549581b-12b7-11e3-895e-1334aEXAMPLE'
t_metadata.append(t_request_id)
t_root.append(t_metadata)
# build result
t_result = etree.Element(result_wrapper)
outputs = op_model.output_shape.members
replace_list = []
for output_name, output_shape in outputs.items():
t_result.append(_get_subtree(output_name, output_shape, replace_list))
t_root.append(t_result)
xml_body = etree.tostring(t_root, pretty_print=True).decode('utf-8')
xml_body_lines = xml_body.splitlines()
for replace in replace_list:
name = replace[0]
prefix = replace[1]
singular_name = singularize(name)
start_tag = '<%s>' % name
iter_name = '{}.{}'.format(prefix[-1], name.lower())if prefix else name.lower()
loop_start = '{%% for %s in %s %%}' % (singular_name.lower(), iter_name)
end_tag = '</%s>' % name
loop_end = '{{ endfor }}'
start_tag_indexes = [i for i, l in enumerate(xml_body_lines) if start_tag in l]
if len(start_tag_indexes) != 1:
raise Exception('tag %s not found in response body' % start_tag)
start_tag_index = start_tag_indexes[0]
xml_body_lines.insert(start_tag_index + 1, loop_start)
end_tag_indexes = [i for i, l in enumerate(xml_body_lines) if end_tag in l]
if len(end_tag_indexes) != 1:
raise Exception('tag %s not found in response body' % end_tag)
end_tag_index = end_tag_indexes[0]
xml_body_lines.insert(end_tag_index, loop_end)
xml_body = '\n'.join(xml_body_lines)
body = '\n{}_TEMPLATE = """{}"""'.format(operation.upper(), xml_body)
return body
def insert_code_to_class(path, base_class, new_code):
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
mod_path = os.path.splitext(path)[0].replace('/', '.')
mod = importlib.import_module(mod_path)
clsmembers = inspect.getmembers(mod, inspect.isclass)
_response_cls = [_[1] for _ in clsmembers if issubclass(_[1], base_class) and _[1] != base_class]
if len(_response_cls) != 1:
raise Exception('unknown error, number of clsmembers is not 1')
response_cls = _response_cls[0]
code_lines, line_no = inspect.getsourcelines(response_cls)
end_line_no = line_no + len(code_lines)
func_lines = [' ' * 4 + _ for _ in new_code.splitlines()]
lines = lines[:end_line_no] + func_lines + lines[end_line_no:]
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def insert_url(service, operation, api_protocol):
client = boto3.client(service)
service_class = client.__class__.__name__
aws_operation_name = to_upper_camel_case(operation)
uri = client._service_model.operation_model(aws_operation_name).http['requestUri']
path = os.path.join(os.path.dirname(__file__), '..', 'moto', get_escaped_service(service), 'urls.py')
with open(path) as f:
lines = [_.replace('\n', '') for _ in f.readlines()]
if any(_ for _ in lines if re.match(uri, _)):
return
url_paths_found = False
last_elem_line_index = -1
for i, line in enumerate(lines):
if line.startswith('url_paths'):
url_paths_found = True
if url_paths_found and line.startswith('}'):
last_elem_line_index = i - 1
prev_line = lines[last_elem_line_index]
if not prev_line.endswith('{') and not prev_line.endswith(','):
lines[last_elem_line_index] += ','
# generate url pattern
if api_protocol == 'rest-json':
new_line = " '{0}/.*$': response.dispatch,"
else:
new_line = " '{0}%s$': %sResponse.dispatch," % (
uri, service_class
)
if new_line in lines:
return
lines.insert(last_elem_line_index + 1, new_line)
body = '\n'.join(lines) + '\n'
with open(path, 'w') as f:
f.write(body)
def insert_codes(service, operation, api_protocol):
func_in_responses = get_function_in_responses(service, operation, api_protocol)
func_in_models = get_function_in_models(service, operation)
# edit responses.py
responses_path = 'moto/{}/responses.py'.format(get_escaped_service(service))
print_progress('inserting code', responses_path, 'green')
insert_code_to_class(responses_path, BaseResponse, func_in_responses)
# insert template
if api_protocol == 'query':
template = get_response_query_template(service, operation)
with open(responses_path) as f:
lines = [_[:-1] for _ in f.readlines()]
lines += template.splitlines()
with open(responses_path, 'w') as f:
f.write('\n'.join(lines))
# edit models.py
models_path = 'moto/{}/models.py'.format(get_escaped_service(service))
print_progress('inserting code', models_path, 'green')
insert_code_to_class(models_path, BaseBackend, func_in_models)
# edit urls.py
insert_url(service, operation, api_protocol)
@click.command()
def main():
service, operation = select_service_and_operation()
api_protocol = boto3.client(service)._service_model.metadata['protocol']
initialize_service(service, operation, api_protocol)
if api_protocol in ['query', 'json', 'rest-json']:
insert_codes(service, operation, api_protocol)
else:
print_progress('skip inserting code', 'api protocol "{}" is not supported'.format(api_protocol), 'yellow')
click.echo('You will still need to add the mock into "__init__.py"'.format(service))
if __name__ == '__main__':
main() | 0.224565 | 0.090093 |
from selfdrive.kegman_conf import kegman_conf
class AtomConf():
def __init__(self, CP=None):
self.kegman = kegman_conf()
self.tun_type = 'lqr'
self.sR_KPH = [0] # Speed kph
self.sR_BPV = [[0,]]
self.sR_steerRatioV = [[13.85,]]
self.sR_ActuatorDelayV = [[0.1,]]
self.sR_pid_KdV = [[1.0,]]
self.sR_pid_KiV = [[0.01,]]
self.sR_pid_KpV = [[0.15,]]
self.sR_pid_deadzone = 0.1
self.sR_lqr_kiV = [[0.01,]]
self.sR_lqr_scaleV = [[2000,]]
self.cv_KPH = [0.] # Speed kph
self.cv_BPV = [[200., 255.]] # CV
self.cv_sMaxV = [[384., 255.]]
self.cv_sdUPV = [[3,2]]
self.cv_sdDNV = [[7,5]]
self.steerOffset = 0.0
self.steerRateCost = 0.4
self.steerLimitTimer = 0.8
self.steerActuatorDelay = 0.1
self.cameraOffset = 0.05
self.ap_autoReasume = 1
self.ap_autoScnOffTime = 0
self.learnerParams = 1
self.read_tune()
def read_tune(self):
conf = self.kegman.read_config()
self.learnerParams = conf['learnerParams']
self.ap_autoReasume = conf['ap_autoReasume']
self.ap_autoScnOffTime = conf['ap_autoScnOffTime']
self.tun_type = conf['tun_type']
self.sR_KPH = conf['sR_KPH']
self.sR_BPV = conf['sR_BPV']
self.sR_steerRatioV = conf['sR_steerRatioV']
self.sR_ActuatorDelayV = conf['sR_ActuatorDelayV']
self.sR_pid_KdV = conf['sR_pid_KdV']
self.sR_pid_KiV = conf['sR_pid_KiV']
self.sR_pid_KpV = conf['sR_pid_KpV']
self.sR_pid_deadzone = conf['sR_pid_deadzone']
self.sR_lqr_kiV = conf['sR_lqr_kiV']
self.sR_lqr_scaleV = conf['sR_lqr_scaleV']
self.cv_KPH = conf['cv_KPH']
self.cv_BPV = conf['cv_BPV']
self.cv_sMaxV = conf['cv_sMaxV']
self.cv_sdUPV = conf['cv_sdUPV']
self.cv_sdDNV = conf['cv_sdDNV']
self.steerOffset = conf['steerOffset']
self.steerRateCost = conf['steerRateCost']
self.steerLimitTimer = conf['steerLimitTimer']
self.cameraOffset = conf['cameraOffset'] | selfdrive/atom_conf.py | from selfdrive.kegman_conf import kegman_conf
class AtomConf():
def __init__(self, CP=None):
self.kegman = kegman_conf()
self.tun_type = 'lqr'
self.sR_KPH = [0] # Speed kph
self.sR_BPV = [[0,]]
self.sR_steerRatioV = [[13.85,]]
self.sR_ActuatorDelayV = [[0.1,]]
self.sR_pid_KdV = [[1.0,]]
self.sR_pid_KiV = [[0.01,]]
self.sR_pid_KpV = [[0.15,]]
self.sR_pid_deadzone = 0.1
self.sR_lqr_kiV = [[0.01,]]
self.sR_lqr_scaleV = [[2000,]]
self.cv_KPH = [0.] # Speed kph
self.cv_BPV = [[200., 255.]] # CV
self.cv_sMaxV = [[384., 255.]]
self.cv_sdUPV = [[3,2]]
self.cv_sdDNV = [[7,5]]
self.steerOffset = 0.0
self.steerRateCost = 0.4
self.steerLimitTimer = 0.8
self.steerActuatorDelay = 0.1
self.cameraOffset = 0.05
self.ap_autoReasume = 1
self.ap_autoScnOffTime = 0
self.learnerParams = 1
self.read_tune()
def read_tune(self):
conf = self.kegman.read_config()
self.learnerParams = conf['learnerParams']
self.ap_autoReasume = conf['ap_autoReasume']
self.ap_autoScnOffTime = conf['ap_autoScnOffTime']
self.tun_type = conf['tun_type']
self.sR_KPH = conf['sR_KPH']
self.sR_BPV = conf['sR_BPV']
self.sR_steerRatioV = conf['sR_steerRatioV']
self.sR_ActuatorDelayV = conf['sR_ActuatorDelayV']
self.sR_pid_KdV = conf['sR_pid_KdV']
self.sR_pid_KiV = conf['sR_pid_KiV']
self.sR_pid_KpV = conf['sR_pid_KpV']
self.sR_pid_deadzone = conf['sR_pid_deadzone']
self.sR_lqr_kiV = conf['sR_lqr_kiV']
self.sR_lqr_scaleV = conf['sR_lqr_scaleV']
self.cv_KPH = conf['cv_KPH']
self.cv_BPV = conf['cv_BPV']
self.cv_sMaxV = conf['cv_sMaxV']
self.cv_sdUPV = conf['cv_sdUPV']
self.cv_sdDNV = conf['cv_sdDNV']
self.steerOffset = conf['steerOffset']
self.steerRateCost = conf['steerRateCost']
self.steerLimitTimer = conf['steerLimitTimer']
self.cameraOffset = conf['cameraOffset'] | 0.309337 | 0.09122 |
def part1(input_str: str) -> None:
numbers: list[str] = input_str.split('\n')
place_counts:dict[int, dict[int, int]] = get_place_counts(numbers)
gamma_rate = ''
epsilon_rate = ''
for k, v in place_counts.items():
if v.get(0, 0) > v.get(1, 0):
gamma_rate += '0'
epsilon_rate += '1'
else:
gamma_rate += '1'
epsilon_rate += '0'
gamma = int(gamma_rate, 2)
epsilon = int(epsilon_rate, 2)
print(f'Gamma: {gamma} [{gamma_rate}] Epsilon: {epsilon} [{epsilon_rate}]')
power_consumption = gamma * epsilon
print(f'Power Consumption: {power_consumption}')
def part2(input_str: str) -> None:
numbers: list[str] = input_str.split('\n')
prefix = ''
place = 0
while len(numbers) != 1:
place_counts: dict[int, dict[int, int]] = get_place_counts(numbers)
counts = place_counts.get(place)
if counts.get(0, 0) > counts.get(1, 0):
prefix += '0'
elif counts.get(0, 0) < counts.get(1, 0):
prefix += '1'
else:
prefix += '1'
numbers = [x for x in numbers if x.startswith(prefix)]
place += 1
oxygen_generator_rating = numbers[0]
numbers: list[str] = input_str.split('\n')
prefix = ''
place = 0
while len(numbers) != 1:
place_counts: dict[int, dict[int, int]] = get_place_counts(numbers)
counts = place_counts.get(place)
if counts.get(0, 0) < counts.get(1, 0):
prefix += '0'
elif counts.get(0, 0) > counts.get(1, 0):
prefix += '1'
else:
prefix += '0'
numbers = [x for x in numbers if x.startswith(prefix)]
place += 1
co2_scrubber_rating = numbers[0]
o2_rating = int(oxygen_generator_rating, 2)
co2_rating = int(co2_scrubber_rating, 2)
print(f'O2 Rating: {o2_rating} [{oxygen_generator_rating}] CO2 Rating {co2_rating} [{co2_scrubber_rating}]')
life_support_rating = o2_rating * co2_rating
print(f'Power Consumption: {life_support_rating}')
def get_place_counts(lines: list[str]) -> dict[int, dict[int, int]]:
place_counts: dict[int, dict[int, int]] = {}
for line in lines:
bin_str = line.strip()
place = 0
for b in bin_str:
count: dict[int, int] = place_counts.get(place)
if count is None:
count = {}
if b == '0':
count.update({0: count.get(0, 0) + 1})
elif b == '1':
count.update({1: count.get(1, 0) + 1})
place_counts.update({place: count})
place += 1
return place_counts
if __name__ == '__main__':
# test_string = ('\n').join(['00100', '11110', '10110', '10111', '10101', '01111', '00111', '11100', '10000', '11001', '00010', '01010'])
# part1(test_string)
# part2(test_string)
with open('../../resources/2021/inputd3.txt', 'r') as f:
test_input = f.read()
part1(test_input)
part2(test_input) | python/2021/aoc2021day3.py | def part1(input_str: str) -> None:
numbers: list[str] = input_str.split('\n')
place_counts:dict[int, dict[int, int]] = get_place_counts(numbers)
gamma_rate = ''
epsilon_rate = ''
for k, v in place_counts.items():
if v.get(0, 0) > v.get(1, 0):
gamma_rate += '0'
epsilon_rate += '1'
else:
gamma_rate += '1'
epsilon_rate += '0'
gamma = int(gamma_rate, 2)
epsilon = int(epsilon_rate, 2)
print(f'Gamma: {gamma} [{gamma_rate}] Epsilon: {epsilon} [{epsilon_rate}]')
power_consumption = gamma * epsilon
print(f'Power Consumption: {power_consumption}')
def part2(input_str: str) -> None:
numbers: list[str] = input_str.split('\n')
prefix = ''
place = 0
while len(numbers) != 1:
place_counts: dict[int, dict[int, int]] = get_place_counts(numbers)
counts = place_counts.get(place)
if counts.get(0, 0) > counts.get(1, 0):
prefix += '0'
elif counts.get(0, 0) < counts.get(1, 0):
prefix += '1'
else:
prefix += '1'
numbers = [x for x in numbers if x.startswith(prefix)]
place += 1
oxygen_generator_rating = numbers[0]
numbers: list[str] = input_str.split('\n')
prefix = ''
place = 0
while len(numbers) != 1:
place_counts: dict[int, dict[int, int]] = get_place_counts(numbers)
counts = place_counts.get(place)
if counts.get(0, 0) < counts.get(1, 0):
prefix += '0'
elif counts.get(0, 0) > counts.get(1, 0):
prefix += '1'
else:
prefix += '0'
numbers = [x for x in numbers if x.startswith(prefix)]
place += 1
co2_scrubber_rating = numbers[0]
o2_rating = int(oxygen_generator_rating, 2)
co2_rating = int(co2_scrubber_rating, 2)
print(f'O2 Rating: {o2_rating} [{oxygen_generator_rating}] CO2 Rating {co2_rating} [{co2_scrubber_rating}]')
life_support_rating = o2_rating * co2_rating
print(f'Power Consumption: {life_support_rating}')
def get_place_counts(lines: list[str]) -> dict[int, dict[int, int]]:
place_counts: dict[int, dict[int, int]] = {}
for line in lines:
bin_str = line.strip()
place = 0
for b in bin_str:
count: dict[int, int] = place_counts.get(place)
if count is None:
count = {}
if b == '0':
count.update({0: count.get(0, 0) + 1})
elif b == '1':
count.update({1: count.get(1, 0) + 1})
place_counts.update({place: count})
place += 1
return place_counts
if __name__ == '__main__':
# test_string = ('\n').join(['00100', '11110', '10110', '10111', '10101', '01111', '00111', '11100', '10000', '11001', '00010', '01010'])
# part1(test_string)
# part2(test_string)
with open('../../resources/2021/inputd3.txt', 'r') as f:
test_input = f.read()
part1(test_input)
part2(test_input) | 0.278159 | 0.414425 |
from torchsummary import summary
from torch import nn
import torch.nn.functional as F
class SEBlock(nn.Module):
def __init__(self, in_channels, reduction=16):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, in_channels // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(in_channels // reduction, in_channels, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class SEResNeXtUnit(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels, stride, cardinality=32):
super().__init__()
self.conv1x1_1 = nn.Conv2d(
in_channels, mid_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_channels)
self.conv3x3 = nn.Conv2d(mid_channels, mid_channels, kernel_size=3,
stride=stride, groups=cardinality, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
self.conv1x1_2 = nn.Conv2d(
mid_channels, out_channels, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.seBlock = SEBlock(out_channels)
self.bn4 = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, input):
x = input
x = self.conv1x1_1(x)
x = F.relu(self.bn1(x), inplace=True)
x = self.conv3x3(x)
x = F.relu(self.bn2(x), inplace=True)
x = self.conv1x1_2(x)
x = F.relu(self.bn3(x), inplace=True)
x = self.seBlock(x)
return F.relu(self.bn4(x + self.shortcut(input)), inplace=True)
class SEResNeXt(nn.Module):
def __init__(self, units):
super().__init__()
self.conv7x7 = nn.Conv2d(3, 64, 7, stride=2, padding=3)
self.bn0 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.stage_1 = self.stage(64, 128, 256, units[0])
self.stage_2 = self.stage(256, 256, 512, units[1], 2)
self.stage_3 = self.stage(512, 512, 1024, units[2], 2)
self.stage_4 = self.stage(1024, 1024, 2048, units[3], 2)
self.gap = nn.AdaptiveAvgPool2d(1)
self.dense = nn.Linear(2048, 1000)
def forward(self, input):
x = input
x = self.conv7x7(x)
x = F.relu(self.bn0(x), inplace=True)
x = self.maxpool(x)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.stage_4(x)
x = self.gap(x)
x = x.view(-1, 2048)
x = self.dense(x)
return x
@staticmethod
def stage(in_channels, mid_channels, out_channels, units, stride=1):
layers = [SEResNeXtUnit(
in_channels, mid_channels, out_channels, stride)]
for _ in range(1, units):
layers.append(SEResNeXtUnit(
out_channels, mid_channels, out_channels, stride=1))
layers = tuple(layers)
return nn.Sequential(*layers)
if __name__ == '__main__':
device = "cpu"
SEResNeXt50 = SEResNeXt([3, 4, 6, 3])
summary(SEResNeXt50, (3, 224, 224), device=device)
SEResNeXt101 = SEResNeXt([3, 4, 23, 3])
summary(SEResNeXt101, (3, 224, 224), device=device) | SEResNeXt/SEResNeXt_pytorch.py | from torchsummary import summary
from torch import nn
import torch.nn.functional as F
class SEBlock(nn.Module):
def __init__(self, in_channels, reduction=16):
super().__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(in_channels, in_channels // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(in_channels // reduction, in_channels, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class SEResNeXtUnit(nn.Module):
def __init__(self, in_channels, mid_channels, out_channels, stride, cardinality=32):
super().__init__()
self.conv1x1_1 = nn.Conv2d(
in_channels, mid_channels, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(mid_channels)
self.conv3x3 = nn.Conv2d(mid_channels, mid_channels, kernel_size=3,
stride=stride, groups=cardinality, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(mid_channels)
self.conv1x1_2 = nn.Conv2d(
mid_channels, out_channels, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(out_channels)
self.seBlock = SEBlock(out_channels)
self.bn4 = nn.BatchNorm2d(out_channels)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1,
stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, input):
x = input
x = self.conv1x1_1(x)
x = F.relu(self.bn1(x), inplace=True)
x = self.conv3x3(x)
x = F.relu(self.bn2(x), inplace=True)
x = self.conv1x1_2(x)
x = F.relu(self.bn3(x), inplace=True)
x = self.seBlock(x)
return F.relu(self.bn4(x + self.shortcut(input)), inplace=True)
class SEResNeXt(nn.Module):
def __init__(self, units):
super().__init__()
self.conv7x7 = nn.Conv2d(3, 64, 7, stride=2, padding=3)
self.bn0 = nn.BatchNorm2d(64)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.stage_1 = self.stage(64, 128, 256, units[0])
self.stage_2 = self.stage(256, 256, 512, units[1], 2)
self.stage_3 = self.stage(512, 512, 1024, units[2], 2)
self.stage_4 = self.stage(1024, 1024, 2048, units[3], 2)
self.gap = nn.AdaptiveAvgPool2d(1)
self.dense = nn.Linear(2048, 1000)
def forward(self, input):
x = input
x = self.conv7x7(x)
x = F.relu(self.bn0(x), inplace=True)
x = self.maxpool(x)
x = self.stage_1(x)
x = self.stage_2(x)
x = self.stage_3(x)
x = self.stage_4(x)
x = self.gap(x)
x = x.view(-1, 2048)
x = self.dense(x)
return x
@staticmethod
def stage(in_channels, mid_channels, out_channels, units, stride=1):
layers = [SEResNeXtUnit(
in_channels, mid_channels, out_channels, stride)]
for _ in range(1, units):
layers.append(SEResNeXtUnit(
out_channels, mid_channels, out_channels, stride=1))
layers = tuple(layers)
return nn.Sequential(*layers)
if __name__ == '__main__':
device = "cpu"
SEResNeXt50 = SEResNeXt([3, 4, 6, 3])
summary(SEResNeXt50, (3, 224, 224), device=device)
SEResNeXt101 = SEResNeXt([3, 4, 23, 3])
summary(SEResNeXt101, (3, 224, 224), device=device) | 0.971712 | 0.510619 |
import time, re, csv, os
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
runList=[]
def connect(driver):
driver.get("https://www.strava.com/login")
driver.find_element_by_id("email").send_keys("<EMAIL>")
driver.find_element_by_id("password").send_keys("<PASSWORD>")
driver.find_element_by_id("login-button").click()
def getdata(user, rundate, driver):
userUrl = "https://www.strava.com/athletes/" + user + "#interval?interval=" + rundate + "&interval_type=month&chart_type=miles&year_offset=0"
driver.get(userUrl)
def getrun(url, driver):
res = [['time', 'distance', 'elevation', 'pace', 'heartrate', 'cadence']]
action = ActionChains(driver)
driver.get(url)
time.sleep(1)
try:
driver.find_element_by_xpath("//td[@data-type='heartrate']/div[@class='toggle-button']").click()
except:
return []
try:
enough = webDriver.find_element_by_xpath("//ul[@class='inline-stats section']/li/strong")
except:
return []
if (float(re.search('^(.+?)<abbr', enough.get_attribute('innerHTML')).group(1).replace(',','.')) < 5):
return[]
driver.find_element_by_xpath("//td[@data-type='cadence']/div[@class='toggle-button']").click()
grid = driver.find_element_by_id("grid")
action.move_to_element(grid).perform()
action.move_by_offset(-398, 0).perform()
for i in range(266):
action.move_by_offset(3, 0).perform()
timev = driver.find_element_by_xpath("//*[@id='crossBar']/*[@class='crossbar-text']").text
distance = driver.find_element_by_xpath("//*[@id='infobox-text-distance']/*[@class='value']").text
elev = driver.find_element_by_xpath("//*[@id='infobox-text-altitude']/*[@class='value']").text
pace = driver.find_element_by_xpath("//*[@id='infobox-text-pace']/*[@class='value']").text
heartrate = driver.find_element_by_xpath("//*[@id='infobox-text-heartrate']/*[@class='value']").text
cadence = driver.find_element_by_xpath("//*[@id='infobox-text-cadence']/*[@class='value']").text
res.append([timev, distance, elev, pace, heartrate, cadence])
action = ActionChains(driver)
time.sleep(1)
return res
def saverun(rundata, user, run):
if not os.path.isdir('./data'):
os.mkdir('./data')
if not os.path.isdir('./data/' + user):
os.mkdir('./data/' + user)
if not os.path.isfile('./data/' + user + '/' + run):
script_dir = os.path.dirname(__file__) # Script directory
full_path = os.path.join(script_dir, './data/' + user + '/' + run + '.csv')
with open(full_path, 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(rundata)
csvFile.close()
if __name__ == "__main__":
userId = str(input("Enter user id :"))
year = str(input("Enter year :"))
month = str(input("Enter month (2 digits):"))
date = year + month
webDriver = webdriver.Chrome()
webDriver.maximize_window()
connect(webDriver)
time.sleep(1)
getdata(userId, date, webDriver)
time.sleep(2)
for div in webDriver.find_elements_by_class_name("activity-title"):
try:
div.find_element_by_class_name("icon-run")
except:
continue
text = div.get_attribute('innerHTML')
found = re.search('href="(.+?)"', text).group(1)
runList.append(found)
for run in runList:
runUrl = "https://www.strava.com" + run
runId = re.search('activities/(.+?)$', run).group(1)
if os.path.isfile('./data/' + userId + '/' + runId + '.csv'):
continue
data = getrun(runUrl,webDriver)
if not data:
continue
saverun(data, userId, runId)
webDriver.close() | web_scraping/onemonth.py | import time, re, csv, os
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains
runList=[]
def connect(driver):
driver.get("https://www.strava.com/login")
driver.find_element_by_id("email").send_keys("<EMAIL>")
driver.find_element_by_id("password").send_keys("<PASSWORD>")
driver.find_element_by_id("login-button").click()
def getdata(user, rundate, driver):
userUrl = "https://www.strava.com/athletes/" + user + "#interval?interval=" + rundate + "&interval_type=month&chart_type=miles&year_offset=0"
driver.get(userUrl)
def getrun(url, driver):
res = [['time', 'distance', 'elevation', 'pace', 'heartrate', 'cadence']]
action = ActionChains(driver)
driver.get(url)
time.sleep(1)
try:
driver.find_element_by_xpath("//td[@data-type='heartrate']/div[@class='toggle-button']").click()
except:
return []
try:
enough = webDriver.find_element_by_xpath("//ul[@class='inline-stats section']/li/strong")
except:
return []
if (float(re.search('^(.+?)<abbr', enough.get_attribute('innerHTML')).group(1).replace(',','.')) < 5):
return[]
driver.find_element_by_xpath("//td[@data-type='cadence']/div[@class='toggle-button']").click()
grid = driver.find_element_by_id("grid")
action.move_to_element(grid).perform()
action.move_by_offset(-398, 0).perform()
for i in range(266):
action.move_by_offset(3, 0).perform()
timev = driver.find_element_by_xpath("//*[@id='crossBar']/*[@class='crossbar-text']").text
distance = driver.find_element_by_xpath("//*[@id='infobox-text-distance']/*[@class='value']").text
elev = driver.find_element_by_xpath("//*[@id='infobox-text-altitude']/*[@class='value']").text
pace = driver.find_element_by_xpath("//*[@id='infobox-text-pace']/*[@class='value']").text
heartrate = driver.find_element_by_xpath("//*[@id='infobox-text-heartrate']/*[@class='value']").text
cadence = driver.find_element_by_xpath("//*[@id='infobox-text-cadence']/*[@class='value']").text
res.append([timev, distance, elev, pace, heartrate, cadence])
action = ActionChains(driver)
time.sleep(1)
return res
def saverun(rundata, user, run):
if not os.path.isdir('./data'):
os.mkdir('./data')
if not os.path.isdir('./data/' + user):
os.mkdir('./data/' + user)
if not os.path.isfile('./data/' + user + '/' + run):
script_dir = os.path.dirname(__file__) # Script directory
full_path = os.path.join(script_dir, './data/' + user + '/' + run + '.csv')
with open(full_path, 'w') as csvFile:
writer = csv.writer(csvFile)
writer.writerows(rundata)
csvFile.close()
if __name__ == "__main__":
userId = str(input("Enter user id :"))
year = str(input("Enter year :"))
month = str(input("Enter month (2 digits):"))
date = year + month
webDriver = webdriver.Chrome()
webDriver.maximize_window()
connect(webDriver)
time.sleep(1)
getdata(userId, date, webDriver)
time.sleep(2)
for div in webDriver.find_elements_by_class_name("activity-title"):
try:
div.find_element_by_class_name("icon-run")
except:
continue
text = div.get_attribute('innerHTML')
found = re.search('href="(.+?)"', text).group(1)
runList.append(found)
for run in runList:
runUrl = "https://www.strava.com" + run
runId = re.search('activities/(.+?)$', run).group(1)
if os.path.isfile('./data/' + userId + '/' + runId + '.csv'):
continue
data = getrun(runUrl,webDriver)
if not data:
continue
saverun(data, userId, runId)
webDriver.close() | 0.080919 | 0.069415 |
from file import BankAccountFileWriter
class BankAccount:
def __init__(self, account_number, name, password, value, admin):
self.account_number = account_number
self.name = name
self.password = password
self.value = value
self.admin = admin
def check_account_number(self, account_number):
return account_number == self.account_number
def check_password(self, password):
return password == self.password
def balance_debit(self, value):
self.value -= value
def __check_availability_to_withdraw(self, value):
if self.value < value:
return False
return True
class CashMachineInsertMoneyBill:
@staticmethod
def insert_money_bill(money_bill, amount):
cash_machine = CashMachineGetter().get()
if money_bill not in cash_machine.money_slips.keys():
cash_machine.money_slips[money_bill] = 0
cash_machine.money_slips[money_bill] += amount
from file import MoneySlipsFileWriter
MoneySlipsFileWriter().write_money_slips(cash_machine.money_slips)
return cash_machine
class CashMachineWithdraw:
@staticmethod
def withdraw(bank_account, value):
account_balance_availability = CashMachineWithdraw.__check_availability_to_withdraw(bank_account, value)
cash_machine = CashMachineGetter().get()
money_slips_user = cash_machine.withdraw(value)
if money_slips_user and account_balance_availability:
CashMachineWithdraw.__balance_debit(bank_account, value)
from file import MoneySlipsFileWriter
MoneySlipsFileWriter().write_money_slips(cash_machine.money_slips)
return cash_machine, account_balance_availability
@staticmethod
def __check_availability_to_withdraw(bank_account, value):
if bank_account.value < value:
return False
return True
@staticmethod
def __balance_debit(bank_account, value):
bank_account.balance_debit(value)
BankAccountFileWriter().write_bank_account(bank_account)
class CashMachineGetter:
def get(self):
from file import MoneySlipsFileReader
money_slips = MoneySlipsFileReader().get_money_slips()
return CashMachine(money_slips)
class CashMachine:
def __init__(self, money_slips):
self.money_slips = money_slips
self.money_slips_user = {}
self.value_remaining = 0
self.sorted_money_slips = {}
def withdraw(self, value):
self.value_remaining = value
self.__calculate_money_slips_user()
if self.value_remaining == 0:
self.__decrease_money_slips()
return False if self.value_remaining != 0 else self.money_slips
def __calculate_money_slips_user(self):
self.__sort_money_slips()
for money_bill in self.sorted_money_slips.keys():
if self.sorted_money_slips[money_bill] >= self.value_remaining // money_bill > 0:
self.money_slips_user[money_bill] = self.value_remaining // money_bill
self.value_remaining -= self.money_slips_user[money_bill] * money_bill
def __sort_money_slips(self):
for elements in sorted(self.money_slips.items(), reverse=True):
self.sorted_money_slips[elements[0]] = elements[1]
def __decrease_money_slips(self):
for money_bill in self.money_slips_user:
self.money_slips[money_bill] -= self.money_slips_user[money_bill] | cash_machine.py | from file import BankAccountFileWriter
class BankAccount:
def __init__(self, account_number, name, password, value, admin):
self.account_number = account_number
self.name = name
self.password = password
self.value = value
self.admin = admin
def check_account_number(self, account_number):
return account_number == self.account_number
def check_password(self, password):
return password == self.password
def balance_debit(self, value):
self.value -= value
def __check_availability_to_withdraw(self, value):
if self.value < value:
return False
return True
class CashMachineInsertMoneyBill:
@staticmethod
def insert_money_bill(money_bill, amount):
cash_machine = CashMachineGetter().get()
if money_bill not in cash_machine.money_slips.keys():
cash_machine.money_slips[money_bill] = 0
cash_machine.money_slips[money_bill] += amount
from file import MoneySlipsFileWriter
MoneySlipsFileWriter().write_money_slips(cash_machine.money_slips)
return cash_machine
class CashMachineWithdraw:
@staticmethod
def withdraw(bank_account, value):
account_balance_availability = CashMachineWithdraw.__check_availability_to_withdraw(bank_account, value)
cash_machine = CashMachineGetter().get()
money_slips_user = cash_machine.withdraw(value)
if money_slips_user and account_balance_availability:
CashMachineWithdraw.__balance_debit(bank_account, value)
from file import MoneySlipsFileWriter
MoneySlipsFileWriter().write_money_slips(cash_machine.money_slips)
return cash_machine, account_balance_availability
@staticmethod
def __check_availability_to_withdraw(bank_account, value):
if bank_account.value < value:
return False
return True
@staticmethod
def __balance_debit(bank_account, value):
bank_account.balance_debit(value)
BankAccountFileWriter().write_bank_account(bank_account)
class CashMachineGetter:
def get(self):
from file import MoneySlipsFileReader
money_slips = MoneySlipsFileReader().get_money_slips()
return CashMachine(money_slips)
class CashMachine:
def __init__(self, money_slips):
self.money_slips = money_slips
self.money_slips_user = {}
self.value_remaining = 0
self.sorted_money_slips = {}
def withdraw(self, value):
self.value_remaining = value
self.__calculate_money_slips_user()
if self.value_remaining == 0:
self.__decrease_money_slips()
return False if self.value_remaining != 0 else self.money_slips
def __calculate_money_slips_user(self):
self.__sort_money_slips()
for money_bill in self.sorted_money_slips.keys():
if self.sorted_money_slips[money_bill] >= self.value_remaining // money_bill > 0:
self.money_slips_user[money_bill] = self.value_remaining // money_bill
self.value_remaining -= self.money_slips_user[money_bill] * money_bill
def __sort_money_slips(self):
for elements in sorted(self.money_slips.items(), reverse=True):
self.sorted_money_slips[elements[0]] = elements[1]
def __decrease_money_slips(self):
for money_bill in self.money_slips_user:
self.money_slips[money_bill] -= self.money_slips_user[money_bill] | 0.572842 | 0.247726 |
import abc
import logging
import time
import warnings
from enum import Enum
from typing import Any
from dateutil.parser import parse
from great_expectations.core import ExpectationSuite, RunIdentifier
from great_expectations.exceptions import GreatExpectationsError
from ..data_asset import DataAsset
from ..dataset import Dataset
logger = logging.getLogger(__name__)
class ProfilerDataType(Enum):
"""Useful data types for building profilers."""
INT = "int"
FLOAT = "float"
STRING = "string"
BOOLEAN = "boolean"
DATETIME = "datetime"
UNKNOWN = "unknown"
class ProfilerCardinality(Enum):
"""Useful cardinality categories for building profilers."""
NONE = "none"
ONE = "one"
TWO = "two"
FEW = "few"
VERY_FEW = "very few"
MANY = "many"
VERY_MANY = "very many"
UNIQUE = "unique"
class ProfilerTypeMapping:
"""Useful backend type mapping for building profilers."""
# Future support possibility: JSON (RECORD)
# Future support possibility: BINARY (BYTES)
INT_TYPE_NAMES = [
"INTEGER",
"integer",
"int",
"INT",
"TINYINT",
"BYTEINT",
"SMALLINT",
"BIGINT",
"IntegerType",
"LongType",
"DECIMAL",
]
FLOAT_TYPE_NAMES = [
"FLOAT",
"DOUBLE",
"FLOAT4",
"FLOAT8",
"DOUBLE_PRECISION",
"NUMERIC",
"FloatType",
"DoubleType",
"float",
"number",
]
STRING_TYPE_NAMES = [
"CHAR",
"VARCHAR",
"NVARCHAR",
"TEXT",
"STRING",
"StringType",
"string",
"str",
]
BOOLEAN_TYPE_NAMES = [
"BOOLEAN",
"boolean",
"BOOL",
"TINYINT",
"BIT",
"bool",
"BooleanType",
]
DATETIME_TYPE_NAMES = [
"DATETIME",
"DATE",
"TIME",
"TIMESTAMP",
"DateType",
"TimestampType",
"datetime64",
"Timestamp",
]
class Profiler(object, metaclass=abc.ABCMeta):
"""
Profilers creates suites from various sources of truth.
These sources of truth can be data or non-data sources such as DDLs.
When implementing a Profiler ensure that you:
- Implement a . _profile() method
- Optionally implement .validate() method that verifies you are running on the right
kind of object. You should raise an appropriate Exception if the object is not valid.
"""
def __init__(self, configuration: dict = None):
self.configuration = configuration
def validate(self, item_to_validate: Any) -> None:
pass
def profile(self, item_to_profile: Any, suite_name: str = None) -> ExpectationSuite:
self.validate(item_to_profile)
expectation_suite = self._profile(item_to_profile, suite_name=suite_name)
return expectation_suite
@abc.abstractmethod
def _profile(
self, item_to_profile: Any, suite_name: str = None
) -> ExpectationSuite:
pass
class DataAssetProfiler(object):
@classmethod
def validate(cls, data_asset):
return isinstance(data_asset, DataAsset)
class DatasetProfiler(DataAssetProfiler):
@classmethod
def validate(cls, dataset):
return isinstance(dataset, Dataset)
@classmethod
def add_expectation_meta(cls, expectation):
expectation.meta[str(cls.__name__)] = {"confidence": "very low"}
return expectation
@classmethod
def add_meta(cls, expectation_suite, batch_kwargs=None):
class_name = str(cls.__name__)
expectation_suite.meta[class_name] = {
"created_by": class_name,
"created_at": time.time(),
}
if batch_kwargs is not None:
expectation_suite.meta[class_name]["batch_kwargs"] = batch_kwargs
new_expectations = [
cls.add_expectation_meta(exp) for exp in expectation_suite.expectations
]
expectation_suite.expectations = new_expectations
if "notes" not in expectation_suite.meta:
expectation_suite.meta["notes"] = {
"format": "markdown",
"content": [
"_To add additional notes, edit the <code>meta.notes.content</code> field in the appropriate Expectation json file._"
# TODO: be more helpful to the user by piping in the filename.
# This will require a minor refactor to make more DataContext information accessible from this method.
# "_To add additional notes, edit the <code>meta.notes.content</code> field in <code>expectations/mydb/default/movies/BasicDatasetProfiler.json</code>_"
],
}
return expectation_suite
@classmethod
def profile(
cls,
data_asset,
run_id=None,
profiler_configuration=None,
run_name=None,
run_time=None,
):
assert not (run_id and run_name) and not (
run_id and run_time
), "Please provide either a run_id or run_name and/or run_time."
if isinstance(run_id, str) and not run_name:
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional). Instead of providing a run_id, you may also provide"
"run_name and run_time separately.",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif not isinstance(run_id, RunIdentifier):
run_name = run_name or "profiling"
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
if not cls.validate(data_asset):
raise GreatExpectationsError("Invalid data_asset for profiler; aborting")
expectation_suite = cls._profile(
data_asset, configuration=profiler_configuration
)
batch_kwargs = data_asset.batch_kwargs
expectation_suite = cls.add_meta(expectation_suite, batch_kwargs)
validation_results = data_asset.validate(
expectation_suite, run_id=run_id, result_format="SUMMARY"
)
expectation_suite.add_citation(
comment=str(cls.__name__) + " added a citation based on the current batch.",
batch_kwargs=data_asset.batch_kwargs,
batch_markers=data_asset.batch_markers,
batch_parameters=data_asset.batch_parameters,
)
return expectation_suite, validation_results
@classmethod
def _profile(cls, dataset, configuration=None):
raise NotImplementedError | great_expectations/profile/base.py | import abc
import logging
import time
import warnings
from enum import Enum
from typing import Any
from dateutil.parser import parse
from great_expectations.core import ExpectationSuite, RunIdentifier
from great_expectations.exceptions import GreatExpectationsError
from ..data_asset import DataAsset
from ..dataset import Dataset
logger = logging.getLogger(__name__)
class ProfilerDataType(Enum):
"""Useful data types for building profilers."""
INT = "int"
FLOAT = "float"
STRING = "string"
BOOLEAN = "boolean"
DATETIME = "datetime"
UNKNOWN = "unknown"
class ProfilerCardinality(Enum):
"""Useful cardinality categories for building profilers."""
NONE = "none"
ONE = "one"
TWO = "two"
FEW = "few"
VERY_FEW = "very few"
MANY = "many"
VERY_MANY = "very many"
UNIQUE = "unique"
class ProfilerTypeMapping:
"""Useful backend type mapping for building profilers."""
# Future support possibility: JSON (RECORD)
# Future support possibility: BINARY (BYTES)
INT_TYPE_NAMES = [
"INTEGER",
"integer",
"int",
"INT",
"TINYINT",
"BYTEINT",
"SMALLINT",
"BIGINT",
"IntegerType",
"LongType",
"DECIMAL",
]
FLOAT_TYPE_NAMES = [
"FLOAT",
"DOUBLE",
"FLOAT4",
"FLOAT8",
"DOUBLE_PRECISION",
"NUMERIC",
"FloatType",
"DoubleType",
"float",
"number",
]
STRING_TYPE_NAMES = [
"CHAR",
"VARCHAR",
"NVARCHAR",
"TEXT",
"STRING",
"StringType",
"string",
"str",
]
BOOLEAN_TYPE_NAMES = [
"BOOLEAN",
"boolean",
"BOOL",
"TINYINT",
"BIT",
"bool",
"BooleanType",
]
DATETIME_TYPE_NAMES = [
"DATETIME",
"DATE",
"TIME",
"TIMESTAMP",
"DateType",
"TimestampType",
"datetime64",
"Timestamp",
]
class Profiler(object, metaclass=abc.ABCMeta):
"""
Profilers creates suites from various sources of truth.
These sources of truth can be data or non-data sources such as DDLs.
When implementing a Profiler ensure that you:
- Implement a . _profile() method
- Optionally implement .validate() method that verifies you are running on the right
kind of object. You should raise an appropriate Exception if the object is not valid.
"""
def __init__(self, configuration: dict = None):
self.configuration = configuration
def validate(self, item_to_validate: Any) -> None:
pass
def profile(self, item_to_profile: Any, suite_name: str = None) -> ExpectationSuite:
self.validate(item_to_profile)
expectation_suite = self._profile(item_to_profile, suite_name=suite_name)
return expectation_suite
@abc.abstractmethod
def _profile(
self, item_to_profile: Any, suite_name: str = None
) -> ExpectationSuite:
pass
class DataAssetProfiler(object):
@classmethod
def validate(cls, data_asset):
return isinstance(data_asset, DataAsset)
class DatasetProfiler(DataAssetProfiler):
@classmethod
def validate(cls, dataset):
return isinstance(dataset, Dataset)
@classmethod
def add_expectation_meta(cls, expectation):
expectation.meta[str(cls.__name__)] = {"confidence": "very low"}
return expectation
@classmethod
def add_meta(cls, expectation_suite, batch_kwargs=None):
class_name = str(cls.__name__)
expectation_suite.meta[class_name] = {
"created_by": class_name,
"created_at": time.time(),
}
if batch_kwargs is not None:
expectation_suite.meta[class_name]["batch_kwargs"] = batch_kwargs
new_expectations = [
cls.add_expectation_meta(exp) for exp in expectation_suite.expectations
]
expectation_suite.expectations = new_expectations
if "notes" not in expectation_suite.meta:
expectation_suite.meta["notes"] = {
"format": "markdown",
"content": [
"_To add additional notes, edit the <code>meta.notes.content</code> field in the appropriate Expectation json file._"
# TODO: be more helpful to the user by piping in the filename.
# This will require a minor refactor to make more DataContext information accessible from this method.
# "_To add additional notes, edit the <code>meta.notes.content</code> field in <code>expectations/mydb/default/movies/BasicDatasetProfiler.json</code>_"
],
}
return expectation_suite
@classmethod
def profile(
cls,
data_asset,
run_id=None,
profiler_configuration=None,
run_name=None,
run_time=None,
):
assert not (run_id and run_name) and not (
run_id and run_time
), "Please provide either a run_id or run_name and/or run_time."
if isinstance(run_id, str) and not run_name:
warnings.warn(
"String run_ids will be deprecated in the future. Please provide a run_id of type "
"RunIdentifier(run_name=None, run_time=None), or a dictionary containing run_name "
"and run_time (both optional). Instead of providing a run_id, you may also provide"
"run_name and run_time separately.",
DeprecationWarning,
)
try:
run_time = parse(run_id)
except (ValueError, TypeError):
pass
run_id = RunIdentifier(run_name=run_id, run_time=run_time)
elif isinstance(run_id, dict):
run_id = RunIdentifier(**run_id)
elif not isinstance(run_id, RunIdentifier):
run_name = run_name or "profiling"
run_id = RunIdentifier(run_name=run_name, run_time=run_time)
if not cls.validate(data_asset):
raise GreatExpectationsError("Invalid data_asset for profiler; aborting")
expectation_suite = cls._profile(
data_asset, configuration=profiler_configuration
)
batch_kwargs = data_asset.batch_kwargs
expectation_suite = cls.add_meta(expectation_suite, batch_kwargs)
validation_results = data_asset.validate(
expectation_suite, run_id=run_id, result_format="SUMMARY"
)
expectation_suite.add_citation(
comment=str(cls.__name__) + " added a citation based on the current batch.",
batch_kwargs=data_asset.batch_kwargs,
batch_markers=data_asset.batch_markers,
batch_parameters=data_asset.batch_parameters,
)
return expectation_suite, validation_results
@classmethod
def _profile(cls, dataset, configuration=None):
raise NotImplementedError | 0.757615 | 0.300386 |
import os
import sys
import json
import base64
import argparse
import subprocess
import collections
from copy import deepcopy
import torch.cuda
from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner
from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..utils import logger
from ..autotuning import Autotuner
DLTS_HOSTFILE = "/job/hostfile"
EXPORT_ENVS = ["NCCL", "PYTHON", "MV2", "UCX"]
DEEPSPEED_ENVIRONMENT_NAME = ".deepspeed_env"
DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.']
PDSH_MAX_FAN_OUT = 1024
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="DeepSpeed runner to help launch distributed "
"multi-node/multi-gpu training jobs.")
parser.add_argument("-H",
"--hostfile",
type=str,
default=DLTS_HOSTFILE,
help="Hostfile path (in MPI style) that defines the "
"resource pool available to the job (e.g., "
"worker-0 slots=4)")
parser.add_argument("-i",
"--include",
type=str,
default="",
help='''Specify hardware resources to use during execution.
String format is
NODE_SPEC[@NODE_SPEC ...],
where
NODE_SPEC=NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include all slots on that host.
Example: -i "worker-0@worker-1:0,2" will use all slots
on worker-0 and slots [0, 2] on worker-1.
''')
parser.add_argument("-e",
"--exclude",
type=str,
default="",
help='''Specify hardware resources to NOT use during execution.
Mutually exclusive with --include. Resource formatting
is the same as --include.
Example: -e "worker-1:0" will use all available
resources except slot 0 on worker-1.
''')
parser.add_argument("--num_nodes",
type=int,
default=-1,
help="Total number of worker nodes to run on, this will use "
"the top N hosts from the given hostfile.")
parser.add_argument("--num_gpus",
type=int,
default=-1,
help="Max number of GPUs to use on each node, will use "
"[0:N) GPU ids on each node.")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="(optional) Port used by PyTorch distributed for "
"communication during training.")
parser.add_argument("--master_addr",
default="",
type=str,
help="(optional) IP address of node 0, will be "
"inferred via 'hostname -I' if not specified.")
parser.add_argument("--launcher",
default=PDSH_LAUNCHER,
type=str,
help="(optional) choose launcher backend for multi-node "
"training. Options currently include PDSH, OpenMPI, MVAPICH.")
parser.add_argument("--launcher_args",
default="",
type=str,
help="(optional) pass launcher specific arguments as a "
"single quoted argument.")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--no_ssh_check",
action="store_true",
help="Do not perform ssh check in multi-node launcher model")
parser.add_argument("--force_multi",
action="store_true",
help="Force multi-node launcher mode, helps in cases where user "
"wants to launch on single remote node.")
parser.add_argument(
"--save_pid",
action="store_true",
help="Save file containing launcher process id (pid) at /tmp/<main-pid>.ds, "
"where <main-pid> is the pid of the first process that invoked `deepspeed`. "
"Useful when launching deepspeed processes programmatically.")
parser.add_argument(
"--autotuning",
default="",
choices=["tune",
"run"],
type=str,
help="Run DeepSpeed autotuner to discover optimal configuration parameters "
"before running job.")
parser.add_argument("user_script",
type=str,
help="User script to launch, followed by any required "
"arguments.")
parser.add_argument('user_args', nargs=argparse.REMAINDER)
return parser.parse_args(args=args)
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
resource_pool = collections.OrderedDict()
for line in fd.readlines():
line = line.strip()
if line == '':
# skip empty lines
continue
try:
hostname, slots = line.split()
_, slot_count = slots.split("=")
slot_count = int(slot_count)
except ValueError as err:
logger.error("Hostfile is not formatted correctly, unable to "
"proceed with training.")
raise err
if hostname in resource_pool:
logger.error("Hostfile contains duplicate hosts, unable to "
"proceed with training.")
raise ValueError(f"host {hostname} is already defined")
resource_pool[hostname] = slot_count
return resource_pool
def _stable_remove_duplicates(data):
# Create a new list in the same order as original but with duplicates
# removed, should never be more than ~16 elements so simple is best
new_list = []
for x in data:
if x not in new_list:
new_list.append(x)
return new_list
def parse_resource_filter(host_info, include_str="", exclude_str=""):
'''Parse an inclusion or exclusion string and filter a hostfile dictionary.
String format is NODE_SPEC[@NODE_SPEC ...], where
NODE_SPEC = NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include/exclude all slots on that host.
Examples:
include_str="worker-0@worker-1:0,2" will use all slots on worker-0 and
slots [0, 2] on worker-1.
exclude_str="worker-1:0" will use all available resources except
slot 0 on worker-1.
'''
# Constants that define our syntax
NODE_SEP = '@'
SLOT_LIST_START = ':'
SLOT_SEP = ','
# Ensure include/exclude are mutually exclusive
if (include_str != "") and (exclude_str != ""):
raise ValueError('include_str and exclude_str are mutually exclusive.')
# no-op
if (include_str == "") and (exclude_str == ""):
return host_info
# Either build from scratch or remove items
filtered_hosts = dict()
if include_str:
parse_str = include_str
if exclude_str != "":
filtered_hosts = deepcopy(host_info)
parse_str = exclude_str
# foreach node in the list
for node_config in parse_str.split(NODE_SEP):
# Node can either be alone or node:slot,slot,slot
if SLOT_LIST_START in node_config:
hostname, slots = node_config.split(SLOT_LIST_START)
slots = [int(x) for x in slots.split(SLOT_SEP)]
# sanity checks
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
for slot in slots:
if slot not in host_info[hostname]:
raise ValueError(f"No slot '{slot}' specified on host '{hostname}'")
# If include string, build the list from here
if include_str:
filtered_hosts[hostname] = slots
elif exclude_str:
for slot in slots:
logger.info(f'removing {slot} from {hostname}')
filtered_hosts[hostname].remove(slot)
# User just specified the whole node
else:
hostname = node_config
# sanity check hostname
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
if include_str:
filtered_hosts[hostname] = host_info[hostname]
elif exclude_str:
filtered_hosts[hostname] = []
# Post-processing to remove duplicates and empty nodes
del_keys = []
for hostname in filtered_hosts:
# Remove duplicates
filtered_hosts[hostname] = _stable_remove_duplicates(filtered_hosts[hostname])
# Remove empty hosts
if len(filtered_hosts[hostname]) == 0:
del_keys.append(hostname)
for name in del_keys:
del filtered_hosts[name]
# Lastly, go over filtered_hosts and convert to a OrderedDict() to ensure
# we map ranks to nodes correctly by maintaining host_info ordering.
ordered_hosts = collections.OrderedDict()
for host in host_info:
if host in filtered_hosts:
ordered_hosts[host] = filtered_hosts[host]
return ordered_hosts
def parse_inclusion_exclusion(resource_pool, inclusion, exclusion):
active_resources = collections.OrderedDict()
for hostname, slots in resource_pool.items():
active_resources[hostname] = list(range(slots))
return parse_resource_filter(active_resources,
include_str=inclusion,
exclude_str=exclusion)
def encode_world_info(world_info):
world_info_json = json.dumps(world_info).encode('utf-8')
world_info_base64 = base64.urlsafe_b64encode(world_info_json).decode('utf-8')
return world_info_base64
def run_autotuning(args, active_resources):
tuner = Autotuner(args, active_resources)
logger.info("[Start] Running autotuning")
tuner.tune()
tuner.print_tuning_results()
logger.info("[End] Running autotuning")
if args.autotuning == "run":
tuner.run_after_tuning()
def main(args=None):
args = parse_args(args)
resource_pool = fetch_hostfile(args.hostfile)
# respect CUDA_VISIBLE_DEVICES for a single node and no explicit resource filters
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
if not resource_pool and len(cuda_visible_devices):
detected_str = f"Detected CUDA_VISIBLE_DEVICES={cuda_visible_devices}"
if len(args.include) or len(
args.exclude) or args.num_nodes > 1 or args.num_gpus > 0:
print(
f"{detected_str} but ignoring it because one or several of --include/--exclude/--num_gpus/--num_nodes cl args were used. If you want to use CUDA_VISIBLE_DEVICES don't pass any of these arguments to deepspeed."
)
else:
args.include = f"localhost:{cuda_visible_devices}"
print(f"{detected_str}: setting --include={args.include}")
del os.environ["CUDA_VISIBLE_DEVICES"]
if args.num_nodes >= 0 or args.num_gpus >= 0:
if args.include != "" or args.exclude != "":
raise ValueError("Cannot specify num_nodes/gpus with include/exclude")
multi_node_exec = True
if not resource_pool:
resource_pool = {}
device_count = torch.cuda.device_count()
if device_count == 0:
raise RuntimeError("Unable to proceed, no GPU resources available")
resource_pool['localhost'] = device_count
args.master_addr = "127.0.0.1"
multi_node_exec = False
if not multi_node_exec and args.num_nodes > 1:
raise ValueError("Num nodes is >1 but no extra nodes available via hostfile")
active_resources = parse_inclusion_exclusion(resource_pool,
args.include,
args.exclude)
env = os.environ.copy()
# validate that passwordless-ssh is workly properly with this hostfile
if multi_node_exec and not args.no_ssh_check:
first_host = list(active_resources.keys())[0]
try:
subprocess.check_call(
f'ssh -o PasswordAuthentication=no {first_host} hostname',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True)
except subprocess.CalledProcessError:
raise RuntimeError(
f"Using hostfile at {args.hostfile} but host={first_host} was not reachable via ssh. If you are running with a single node please remove {args.hostfile} or setup passwordless ssh."
)
if not args.master_addr:
assert multi_node_exec
first_host = list(active_resources.keys())[0]
hostname_cmd = [f"ssh {first_host} hostname -I"]
result = subprocess.check_output(hostname_cmd, shell=True)
args.master_addr = result.decode('utf-8').split()[0]
logger.info(f"Using IP address of {args.master_addr} for node {first_host}")
if args.autotuning != "":
run_autotuning(args, active_resources)
return
if args.num_nodes > 0:
updated_active_resources = collections.OrderedDict()
for count, hostname in enumerate(active_resources.keys()):
if args.num_nodes == count:
break
updated_active_resources[hostname] = active_resources[hostname]
active_resources = updated_active_resources
if args.num_gpus > 0:
updated_active_resources = collections.OrderedDict()
for hostname in active_resources.keys():
updated_active_resources[hostname] = list(range(args.num_gpus))
active_resources = updated_active_resources
# encode world info as base64 to make it easier to pass via command line
world_info_base64 = encode_world_info(active_resources)
multi_node_exec = args.force_multi or len(active_resources) > 1
if not multi_node_exec:
deepspeed_launch = [
sys.executable,
"-u",
"-m",
"deepspeed.launcher.launch",
f"--world_info={world_info_base64}",
f"--master_addr={args.master_addr}",
f"--master_port={args.master_port}"
]
if args.no_python:
deepspeed_launch.append("--no_python")
if args.module:
deepspeed_launch.append("--module")
if args.no_local_rank:
deepspeed_launch.append("--no_local_rank")
if args.save_pid:
deepspeed_launch += ["--save_pid", f"{os.getpid()}"]
cmd = deepspeed_launch + [args.user_script] + args.user_args
else:
args.launcher = args.launcher.lower()
if args.launcher == PDSH_LAUNCHER:
runner = PDSHRunner(args, world_info_base64)
elif args.launcher == OPENMPI_LAUNCHER:
runner = OpenMPIRunner(args, world_info_base64, resource_pool)
elif args.launcher == MVAPICH_LAUNCHER:
runner = MVAPICHRunner(args, world_info_base64, resource_pool)
else:
raise NotImplementedError(f"Unknown launcher {args.launcher}")
if not runner.backend_exists():
raise RuntimeError(f"launcher '{args.launcher}' not installed.")
curr_path = os.path.abspath('.')
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = curr_path + ":" + env['PYTHONPATH']
else:
env['PYTHONPATH'] = curr_path
exports = ""
for var in env.keys():
if any([var.startswith(name) for name in EXPORT_ENVS]):
runner.add_export(var, env[var])
for environ_path in DEEPSPEED_ENVIRONMENT_PATHS:
environ_file = os.path.join(environ_path, DEEPSPEED_ENVIRONMENT_NAME)
if os.path.isfile(environ_file):
with open(environ_file, 'r') as fd:
for var in fd.readlines():
key, val = var.split('=', maxsplit=1)
runner.add_export(key, val)
cmd = runner.get_cmd(env, active_resources)
logger.info(f"cmd = {' '.join(cmd)}")
result = subprocess.Popen(cmd, env=env)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
if __name__ == "__main__":
main() | deepspeed/launcher/runner.py | import os
import sys
import json
import base64
import argparse
import subprocess
import collections
from copy import deepcopy
import torch.cuda
from .multinode_runner import PDSHRunner, OpenMPIRunner, MVAPICHRunner
from .constants import PDSH_LAUNCHER, OPENMPI_LAUNCHER, MVAPICH_LAUNCHER
from ..constants import TORCH_DISTRIBUTED_DEFAULT_PORT
from ..utils import logger
from ..autotuning import Autotuner
DLTS_HOSTFILE = "/job/hostfile"
EXPORT_ENVS = ["NCCL", "PYTHON", "MV2", "UCX"]
DEEPSPEED_ENVIRONMENT_NAME = ".deepspeed_env"
DEEPSPEED_ENVIRONMENT_PATHS = [os.path.expanduser("~"), '.']
PDSH_MAX_FAN_OUT = 1024
def parse_args(args=None):
parser = argparse.ArgumentParser(
description="DeepSpeed runner to help launch distributed "
"multi-node/multi-gpu training jobs.")
parser.add_argument("-H",
"--hostfile",
type=str,
default=DLTS_HOSTFILE,
help="Hostfile path (in MPI style) that defines the "
"resource pool available to the job (e.g., "
"worker-0 slots=4)")
parser.add_argument("-i",
"--include",
type=str,
default="",
help='''Specify hardware resources to use during execution.
String format is
NODE_SPEC[@NODE_SPEC ...],
where
NODE_SPEC=NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include all slots on that host.
Example: -i "worker-0@worker-1:0,2" will use all slots
on worker-0 and slots [0, 2] on worker-1.
''')
parser.add_argument("-e",
"--exclude",
type=str,
default="",
help='''Specify hardware resources to NOT use during execution.
Mutually exclusive with --include. Resource formatting
is the same as --include.
Example: -e "worker-1:0" will use all available
resources except slot 0 on worker-1.
''')
parser.add_argument("--num_nodes",
type=int,
default=-1,
help="Total number of worker nodes to run on, this will use "
"the top N hosts from the given hostfile.")
parser.add_argument("--num_gpus",
type=int,
default=-1,
help="Max number of GPUs to use on each node, will use "
"[0:N) GPU ids on each node.")
parser.add_argument("--master_port",
default=TORCH_DISTRIBUTED_DEFAULT_PORT,
type=int,
help="(optional) Port used by PyTorch distributed for "
"communication during training.")
parser.add_argument("--master_addr",
default="",
type=str,
help="(optional) IP address of node 0, will be "
"inferred via 'hostname -I' if not specified.")
parser.add_argument("--launcher",
default=PDSH_LAUNCHER,
type=str,
help="(optional) choose launcher backend for multi-node "
"training. Options currently include PDSH, OpenMPI, MVAPICH.")
parser.add_argument("--launcher_args",
default="",
type=str,
help="(optional) pass launcher specific arguments as a "
"single quoted argument.")
parser.add_argument("--module",
action="store_true",
help="Change each process to interpret the launch "
"script as a Python module, executing with the same "
"behavior as 'python -m'.")
parser.add_argument("--no_python",
action="store_true",
help="Skip prepending the training script with "
"'python' - just execute it directly.")
parser.add_argument("--no_local_rank",
action="store_true",
help="Do not pass local_rank as an argument when calling "
"the user's training script.")
parser.add_argument("--no_ssh_check",
action="store_true",
help="Do not perform ssh check in multi-node launcher model")
parser.add_argument("--force_multi",
action="store_true",
help="Force multi-node launcher mode, helps in cases where user "
"wants to launch on single remote node.")
parser.add_argument(
"--save_pid",
action="store_true",
help="Save file containing launcher process id (pid) at /tmp/<main-pid>.ds, "
"where <main-pid> is the pid of the first process that invoked `deepspeed`. "
"Useful when launching deepspeed processes programmatically.")
parser.add_argument(
"--autotuning",
default="",
choices=["tune",
"run"],
type=str,
help="Run DeepSpeed autotuner to discover optimal configuration parameters "
"before running job.")
parser.add_argument("user_script",
type=str,
help="User script to launch, followed by any required "
"arguments.")
parser.add_argument('user_args', nargs=argparse.REMAINDER)
return parser.parse_args(args=args)
def fetch_hostfile(hostfile_path):
if not os.path.isfile(hostfile_path):
logger.warning("Unable to find hostfile, will proceed with training "
"with local resources only.")
return None
# e.g., worker-0 slots=16
with open(hostfile_path, 'r') as fd:
resource_pool = collections.OrderedDict()
for line in fd.readlines():
line = line.strip()
if line == '':
# skip empty lines
continue
try:
hostname, slots = line.split()
_, slot_count = slots.split("=")
slot_count = int(slot_count)
except ValueError as err:
logger.error("Hostfile is not formatted correctly, unable to "
"proceed with training.")
raise err
if hostname in resource_pool:
logger.error("Hostfile contains duplicate hosts, unable to "
"proceed with training.")
raise ValueError(f"host {hostname} is already defined")
resource_pool[hostname] = slot_count
return resource_pool
def _stable_remove_duplicates(data):
# Create a new list in the same order as original but with duplicates
# removed, should never be more than ~16 elements so simple is best
new_list = []
for x in data:
if x not in new_list:
new_list.append(x)
return new_list
def parse_resource_filter(host_info, include_str="", exclude_str=""):
'''Parse an inclusion or exclusion string and filter a hostfile dictionary.
String format is NODE_SPEC[@NODE_SPEC ...], where
NODE_SPEC = NAME[:SLOT[,SLOT ...]].
If :SLOT is omitted, include/exclude all slots on that host.
Examples:
include_str="worker-0@worker-1:0,2" will use all slots on worker-0 and
slots [0, 2] on worker-1.
exclude_str="worker-1:0" will use all available resources except
slot 0 on worker-1.
'''
# Constants that define our syntax
NODE_SEP = '@'
SLOT_LIST_START = ':'
SLOT_SEP = ','
# Ensure include/exclude are mutually exclusive
if (include_str != "") and (exclude_str != ""):
raise ValueError('include_str and exclude_str are mutually exclusive.')
# no-op
if (include_str == "") and (exclude_str == ""):
return host_info
# Either build from scratch or remove items
filtered_hosts = dict()
if include_str:
parse_str = include_str
if exclude_str != "":
filtered_hosts = deepcopy(host_info)
parse_str = exclude_str
# foreach node in the list
for node_config in parse_str.split(NODE_SEP):
# Node can either be alone or node:slot,slot,slot
if SLOT_LIST_START in node_config:
hostname, slots = node_config.split(SLOT_LIST_START)
slots = [int(x) for x in slots.split(SLOT_SEP)]
# sanity checks
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
for slot in slots:
if slot not in host_info[hostname]:
raise ValueError(f"No slot '{slot}' specified on host '{hostname}'")
# If include string, build the list from here
if include_str:
filtered_hosts[hostname] = slots
elif exclude_str:
for slot in slots:
logger.info(f'removing {slot} from {hostname}')
filtered_hosts[hostname].remove(slot)
# User just specified the whole node
else:
hostname = node_config
# sanity check hostname
if hostname not in host_info:
raise ValueError(f"Hostname '{hostname}' not found in hostfile")
if include_str:
filtered_hosts[hostname] = host_info[hostname]
elif exclude_str:
filtered_hosts[hostname] = []
# Post-processing to remove duplicates and empty nodes
del_keys = []
for hostname in filtered_hosts:
# Remove duplicates
filtered_hosts[hostname] = _stable_remove_duplicates(filtered_hosts[hostname])
# Remove empty hosts
if len(filtered_hosts[hostname]) == 0:
del_keys.append(hostname)
for name in del_keys:
del filtered_hosts[name]
# Lastly, go over filtered_hosts and convert to a OrderedDict() to ensure
# we map ranks to nodes correctly by maintaining host_info ordering.
ordered_hosts = collections.OrderedDict()
for host in host_info:
if host in filtered_hosts:
ordered_hosts[host] = filtered_hosts[host]
return ordered_hosts
def parse_inclusion_exclusion(resource_pool, inclusion, exclusion):
active_resources = collections.OrderedDict()
for hostname, slots in resource_pool.items():
active_resources[hostname] = list(range(slots))
return parse_resource_filter(active_resources,
include_str=inclusion,
exclude_str=exclusion)
def encode_world_info(world_info):
world_info_json = json.dumps(world_info).encode('utf-8')
world_info_base64 = base64.urlsafe_b64encode(world_info_json).decode('utf-8')
return world_info_base64
def run_autotuning(args, active_resources):
tuner = Autotuner(args, active_resources)
logger.info("[Start] Running autotuning")
tuner.tune()
tuner.print_tuning_results()
logger.info("[End] Running autotuning")
if args.autotuning == "run":
tuner.run_after_tuning()
def main(args=None):
args = parse_args(args)
resource_pool = fetch_hostfile(args.hostfile)
# respect CUDA_VISIBLE_DEVICES for a single node and no explicit resource filters
cuda_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES", "")
if not resource_pool and len(cuda_visible_devices):
detected_str = f"Detected CUDA_VISIBLE_DEVICES={cuda_visible_devices}"
if len(args.include) or len(
args.exclude) or args.num_nodes > 1 or args.num_gpus > 0:
print(
f"{detected_str} but ignoring it because one or several of --include/--exclude/--num_gpus/--num_nodes cl args were used. If you want to use CUDA_VISIBLE_DEVICES don't pass any of these arguments to deepspeed."
)
else:
args.include = f"localhost:{cuda_visible_devices}"
print(f"{detected_str}: setting --include={args.include}")
del os.environ["CUDA_VISIBLE_DEVICES"]
if args.num_nodes >= 0 or args.num_gpus >= 0:
if args.include != "" or args.exclude != "":
raise ValueError("Cannot specify num_nodes/gpus with include/exclude")
multi_node_exec = True
if not resource_pool:
resource_pool = {}
device_count = torch.cuda.device_count()
if device_count == 0:
raise RuntimeError("Unable to proceed, no GPU resources available")
resource_pool['localhost'] = device_count
args.master_addr = "127.0.0.1"
multi_node_exec = False
if not multi_node_exec and args.num_nodes > 1:
raise ValueError("Num nodes is >1 but no extra nodes available via hostfile")
active_resources = parse_inclusion_exclusion(resource_pool,
args.include,
args.exclude)
env = os.environ.copy()
# validate that passwordless-ssh is workly properly with this hostfile
if multi_node_exec and not args.no_ssh_check:
first_host = list(active_resources.keys())[0]
try:
subprocess.check_call(
f'ssh -o PasswordAuthentication=no {first_host} hostname',
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
shell=True)
except subprocess.CalledProcessError:
raise RuntimeError(
f"Using hostfile at {args.hostfile} but host={first_host} was not reachable via ssh. If you are running with a single node please remove {args.hostfile} or setup passwordless ssh."
)
if not args.master_addr:
assert multi_node_exec
first_host = list(active_resources.keys())[0]
hostname_cmd = [f"ssh {first_host} hostname -I"]
result = subprocess.check_output(hostname_cmd, shell=True)
args.master_addr = result.decode('utf-8').split()[0]
logger.info(f"Using IP address of {args.master_addr} for node {first_host}")
if args.autotuning != "":
run_autotuning(args, active_resources)
return
if args.num_nodes > 0:
updated_active_resources = collections.OrderedDict()
for count, hostname in enumerate(active_resources.keys()):
if args.num_nodes == count:
break
updated_active_resources[hostname] = active_resources[hostname]
active_resources = updated_active_resources
if args.num_gpus > 0:
updated_active_resources = collections.OrderedDict()
for hostname in active_resources.keys():
updated_active_resources[hostname] = list(range(args.num_gpus))
active_resources = updated_active_resources
# encode world info as base64 to make it easier to pass via command line
world_info_base64 = encode_world_info(active_resources)
multi_node_exec = args.force_multi or len(active_resources) > 1
if not multi_node_exec:
deepspeed_launch = [
sys.executable,
"-u",
"-m",
"deepspeed.launcher.launch",
f"--world_info={world_info_base64}",
f"--master_addr={args.master_addr}",
f"--master_port={args.master_port}"
]
if args.no_python:
deepspeed_launch.append("--no_python")
if args.module:
deepspeed_launch.append("--module")
if args.no_local_rank:
deepspeed_launch.append("--no_local_rank")
if args.save_pid:
deepspeed_launch += ["--save_pid", f"{os.getpid()}"]
cmd = deepspeed_launch + [args.user_script] + args.user_args
else:
args.launcher = args.launcher.lower()
if args.launcher == PDSH_LAUNCHER:
runner = PDSHRunner(args, world_info_base64)
elif args.launcher == OPENMPI_LAUNCHER:
runner = OpenMPIRunner(args, world_info_base64, resource_pool)
elif args.launcher == MVAPICH_LAUNCHER:
runner = MVAPICHRunner(args, world_info_base64, resource_pool)
else:
raise NotImplementedError(f"Unknown launcher {args.launcher}")
if not runner.backend_exists():
raise RuntimeError(f"launcher '{args.launcher}' not installed.")
curr_path = os.path.abspath('.')
if 'PYTHONPATH' in env:
env['PYTHONPATH'] = curr_path + ":" + env['PYTHONPATH']
else:
env['PYTHONPATH'] = curr_path
exports = ""
for var in env.keys():
if any([var.startswith(name) for name in EXPORT_ENVS]):
runner.add_export(var, env[var])
for environ_path in DEEPSPEED_ENVIRONMENT_PATHS:
environ_file = os.path.join(environ_path, DEEPSPEED_ENVIRONMENT_NAME)
if os.path.isfile(environ_file):
with open(environ_file, 'r') as fd:
for var in fd.readlines():
key, val = var.split('=', maxsplit=1)
runner.add_export(key, val)
cmd = runner.get_cmd(env, active_resources)
logger.info(f"cmd = {' '.join(cmd)}")
result = subprocess.Popen(cmd, env=env)
result.wait()
# In case of failure must propagate the error-condition back to the caller (usually shell). The
# actual error and traceback should have been printed in the subprocess, so in order to avoid
# unnecessary noise we just quietly exit here with the same code as the subprocess
if result.returncode > 0:
sys.exit(result.returncode)
if __name__ == "__main__":
main() | 0.452536 | 0.083516 |
from __future__ import print_function
import unittest
import numpy as np
import os
import sys
import six
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.fluid as fluid
from paddle.fluid.optimizer import Adam
import paddle.fluid.framework as framework
from test_imperative_base import new_program_scope
from paddle.optimizer.lr import LRScheduler
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
SEED = 10
IMAGE_SIZE = 784
CLASS_NUM = 10
if six.PY2:
LARGE_PARAM = 2**2
else:
LARGE_PARAM = 2**26
def random_batch_reader():
def _get_random_inputs_and_labels():
np.random.seed(SEED)
image = np.random.random([BATCH_SIZE, IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (
BATCH_SIZE,
1, )).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_inputs_and_labels()
batch_image = paddle.to_tensor(batch_image)
batch_label = paddle.to_tensor(batch_label)
yield batch_image, batch_label
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, x):
return self._linear(x)
class LayerWithLargeParameters(paddle.nn.Layer):
def __init__(self):
super(LayerWithLargeParameters, self).__init__()
self._l = paddle.nn.Linear(10, LARGE_PARAM)
def forward(self, x):
y = self._l(x)
return y
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
class TestSaveLoadLargeParameters(unittest.TestCase):
def setUp(self):
pass
def test_large_parameters_paddle_save(self):
# enable dygraph mode
paddle.disable_static()
# create network
layer = LayerWithLargeParameters()
save_dict = layer.state_dict()
path = os.path.join("test_paddle_save_load_large_param_save",
"layer.pdparams")
if six.PY2:
protocol = 2
else:
protocol = 4
paddle.save(save_dict, path, protocol=protocol)
dict_load = paddle.load(path)
# compare results before and after saving
for key, value in save_dict.items():
self.assertTrue(
np.array_equal(dict_load[key].numpy(), value.numpy()))
class TestSaveLoadPickle(unittest.TestCase):
def test_pickle_protocol(self):
# enable dygraph mode
paddle.disable_static()
# create network
layer = LinearNet()
save_dict = layer.state_dict()
path = os.path.join("test_paddle_save_load_pickle_protocol",
"layer.pdparams")
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 2.0)
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 1)
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 5)
protocols = [2, ]
if sys.version_info.major >= 3 and sys.version_info.minor >= 4:
protocols += [3, 4]
for protocol in protocols:
paddle.save(save_dict, path, pickle_protocol=protocol)
dict_load = paddle.load(path)
# compare results before and after saving
for key, value in save_dict.items():
self.assertTrue(
np.array_equal(dict_load[key].numpy(), value.numpy()))
class TestSaveLoadAny(unittest.TestCase):
def set_zero(self, prog, place, scope=None):
if scope is None:
scope = fluid.global_scope()
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
ten = scope.find_var(var.name).get_tensor()
if ten is not None:
ten.set(np.zeros_like(np.array(ten)), place)
new_t = np.array(scope.find_var(var.name).get_tensor())
self.assertTrue(np.sum(np.abs(new_t)) == 0)
def replace_static_save(self, program, model_path, pickle_protocol=2):
with self.assertRaises(TypeError):
program.state_dict(1)
with self.assertRaises(TypeError):
program.state_dict(scope=1)
with self.assertRaises(ValueError):
program.state_dict('x')
state_dict_param = program.state_dict('param')
paddle.save(state_dict_param, model_path + '.pdparams')
state_dict_opt = program.state_dict('opt')
paddle.save(state_dict_opt, model_path + '.pdopt')
state_dict_all = program.state_dict()
paddle.save(state_dict_opt, model_path + '.pdall')
def replace_static_load(self, program, model_path):
with self.assertRaises(TypeError):
program.set_state_dict(1)
state_dict_param = paddle.load(model_path + '.pdparams')
state_dict_param['fake_var_name.@@'] = np.random.randn(1, 2)
state_dict_param['static_x'] = 'UserWarning'
program.set_state_dict(state_dict_param)
state_dict_param['static_x'] = np.random.randn(1, 2)
program.set_state_dict(state_dict_param)
program.set_state_dict(state_dict_param)
state_dict_opt = paddle.load(model_path + '.pdopt')
program.set_state_dict(state_dict_opt)
def test_replace_static_save_load(self):
paddle.enable_static()
with new_program_scope():
x = paddle.static.data(
name="static_x", shape=[None, IMAGE_SIZE], dtype='float32')
z = paddle.static.nn.fc(x, 10)
z = paddle.static.nn.fc(z, 10, bias_attr=False)
loss = fluid.layers.reduce_mean(z)
opt = Adam(learning_rate=1e-3)
opt.minimize(loss)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
prog = paddle.static.default_main_program()
fake_inputs = np.random.randn(2, IMAGE_SIZE).astype('float32')
exe.run(prog, feed={'static_x': fake_inputs}, fetch_list=[loss])
base_map = {}
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_map[var.name] = t
path = os.path.join("test_replace_static_save_load", "model")
# paddle.save, legacy paddle.fluid.load
self.replace_static_save(prog, path)
self.set_zero(prog, place)
paddle.fluid.io.load(prog, path)
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, np.array(base_t)))
# legacy paddle.fluid.save, paddle.load
paddle.fluid.io.save(prog, path)
self.set_zero(prog, place)
self.replace_static_load(prog, path)
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, base_t))
# test for return tensor
path_vars = 'test_replace_save_load_return_tensor_static/model'
for var in prog.list_vars():
if var.persistable:
tensor = var.get_value(fluid.global_scope())
paddle.save(tensor, os.path.join(path_vars, var.name))
with self.assertRaises(TypeError):
var.get_value('fluid.global_scope()')
with self.assertRaises(ValueError):
x.get_value()
with self.assertRaises(TypeError):
x.set_value('1')
fake_data = np.zeros([3, 2, 1, 2, 3])
with self.assertRaises(TypeError):
x.set_value(fake_data, '1')
with self.assertRaises(ValueError):
x.set_value(fake_data)
with self.assertRaises(ValueError):
var.set_value(fake_data)
# set var to zero
self.set_zero(prog, place)
for var in prog.list_vars():
if var.persistable:
tensor = paddle.load(
os.path.join(path_vars, var.name), return_numpy=False)
var.set_value(tensor)
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, base_t))
def test_paddle_save_load_v2(self):
paddle.disable_static()
class StepDecay(LRScheduler):
def __init__(self,
learning_rate,
step_size,
gamma=0.1,
last_epoch=-1,
verbose=False):
self.step_size = step_size
self.gamma = gamma
super(StepDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
i = self.last_epoch // self.step_size
return self.base_lr * (self.gamma**i)
layer = LinearNet()
inps = paddle.randn([2, IMAGE_SIZE])
adam = opt.Adam(
learning_rate=StepDecay(0.1, 1), parameters=layer.parameters())
y = layer(inps)
y.mean().backward()
adam.step()
state_dict = adam.state_dict()
path = 'paddle_save_load_v2/model.pdparams'
with self.assertRaises(TypeError):
paddle.save(state_dict, path, use_binary_format='False')
# legacy paddle.save, paddle.load
paddle.framework.io._legacy_save(state_dict, path)
load_dict_tensor = paddle.load(path, return_numpy=False)
# legacy paddle.load, paddle.save
paddle.save(state_dict, path)
load_dict_np = paddle.framework.io._legacy_load(path)
for k, v in state_dict.items():
if isinstance(v, dict):
self.assertTrue(v == load_dict_tensor[k])
else:
self.assertTrue(
np.array_equal(v.numpy(), load_dict_tensor[k].numpy()))
if not np.array_equal(v.numpy(), load_dict_np[k]):
print(v.numpy())
print(load_dict_np[k])
self.assertTrue(np.array_equal(v.numpy(), load_dict_np[k]))
def test_single_pickle_var_dygraph(self):
# enable dygraph mode
paddle.disable_static()
layer = LinearNet()
path = 'paddle_save_load_v2/var_dygraph'
tensor = layer._linear.weight
with self.assertRaises(ValueError):
paddle.save(tensor, path, pickle_protocol='3')
with self.assertRaises(ValueError):
paddle.save(tensor, path, pickle_protocol=5)
paddle.save(tensor, path)
t_dygraph = paddle.load(path)
np_dygraph = paddle.load(path, return_numpy=True)
self.assertTrue(isinstance(t_dygraph, paddle.fluid.core.VarBase))
self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph))
self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy()))
paddle.enable_static()
lod_static = paddle.load(path)
np_static = paddle.load(path, return_numpy=True)
self.assertTrue(isinstance(lod_static, paddle.fluid.core.LoDTensor))
self.assertTrue(np.array_equal(tensor.numpy(), np_static))
self.assertTrue(np.array_equal(tensor.numpy(), np.array(lod_static)))
def test_single_pickle_var_static(self):
# enable static mode
paddle.enable_static()
with new_program_scope():
# create network
x = paddle.static.data(
name="x", shape=[None, IMAGE_SIZE], dtype='float32')
z = paddle.static.nn.fc(x, 128)
loss = fluid.layers.reduce_mean(z)
place = fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
prog = paddle.static.default_main_program()
for var in prog.list_vars():
if list(var.shape) == [IMAGE_SIZE, 128]:
tensor = var.get_value()
break
scope = fluid.global_scope()
origin_tensor = np.array(tensor)
path = 'test_single_pickle_var_static/var'
paddle.save(tensor, path)
self.set_zero(prog, place, scope)
# static load
lod_static = paddle.load(path)
np_static = paddle.load(path, return_numpy=True)
# set_tensor(np.ndarray)
var.set_value(np_static, scope)
self.assertTrue(np.array_equal(origin_tensor, np.array(tensor)))
# set_tensor(LoDTensor)
self.set_zero(prog, place, scope)
var.set_value(lod_static, scope)
self.assertTrue(np.array_equal(origin_tensor, np.array(tensor)))
# enable dygraph mode
paddle.disable_static()
var_dygraph = paddle.load(path)
np_dygraph = paddle.load(path, return_numpy=True)
self.assertTrue(np.array_equal(np.array(tensor), np_dygraph))
self.assertTrue(np.array_equal(np.array(tensor), var_dygraph.numpy()))
def test_dygraph_save_static_load(self):
inps = np.random.randn(1, IMAGE_SIZE).astype('float32')
path = 'test_dygraph_save_static_load/dy-static.pdparams'
paddle.disable_static()
with paddle.utils.unique_name.guard():
layer = LinearNet()
state_dict_dy = layer.state_dict()
paddle.save(state_dict_dy, path)
paddle.enable_static()
with new_program_scope():
layer = LinearNet()
data = paddle.static.data(
name='x_static_save', shape=(None, IMAGE_SIZE), dtype='float32')
y_static = layer(data)
program = paddle.static.default_main_program()
place = fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
state_dict = paddle.load(path, keep_name_table=True)
program.set_state_dict(state_dict)
state_dict_param = program.state_dict("param")
for name, tensor in state_dict_dy.items():
self.assertTrue(
np.array_equal(tensor.numpy(),
np.array(state_dict_param[tensor.name])))
def test_save_load_complex_object_dygraph_save(self):
paddle.disable_static()
layer = paddle.nn.Linear(3, 4)
state_dict = layer.state_dict()
obj1 = [
paddle.randn(
[3, 4], dtype='float32'), np.random.randn(5, 6),
('fake_weight', np.ones(
[7, 8], dtype='float32'))
]
obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123}
obj3 = (paddle.randn(
[5, 4], dtype='float32'), np.random.randn(3, 4).astype("float32"), {
"state_dict": state_dict,
"opt": state_dict
})
obj4 = (np.random.randn(5, 6), (123, ))
path1 = "test_save_load_any_complex_object_dygraph/obj1"
path2 = "test_save_load_any_complex_object_dygraph/obj2"
path3 = "test_save_load_any_complex_object_dygraph/obj3"
path4 = "test_save_load_any_complex_object_dygraph/obj4"
paddle.save(obj1, path1)
paddle.save(obj2, path2)
paddle.save(obj3, path3)
paddle.save(obj4, path4)
load_tensor1 = paddle.load(path1, return_numpy=False)
load_tensor2 = paddle.load(path2, return_numpy=False)
load_tensor3 = paddle.load(path3, return_numpy=False)
load_tensor4 = paddle.load(path4, return_numpy=False)
self.assertTrue(
np.array_equal(load_tensor1[0].numpy(), obj1[0].numpy()))
self.assertTrue(np.array_equal(load_tensor1[1], obj1[1]))
self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1]))
for i in range(len(load_tensor1)):
self.assertTrue(
type(load_tensor1[i]) == type(load_tensor2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(v.numpy(), load_tensor2['k2'][k].numpy()))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(
np.array_equal(load_tensor3[0].numpy(), obj3[0].numpy()))
self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_tensor3[2]["state_dict"][k].numpy(),
v.numpy()))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_tensor3[2]["opt"][k].numpy(), v.numpy()))
self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
load_array2 = paddle.load(path2, return_numpy=True)
load_array3 = paddle.load(path3, return_numpy=True)
load_array4 = paddle.load(path4, return_numpy=True)
self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy()))
self.assertTrue(np.array_equal(load_array1[1], obj1[1]))
self.assertTrue(np.array_equal(load_array1[2], obj1[2][1]))
for i in range(len(load_array1)):
self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k]))
self.assertTrue(load_array2['epoch'] == 123)
self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy()))
self.assertTrue(np.array_equal(load_array3[1], obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["state_dict"][k], v.numpy()))
for k, v in state_dict.items():
self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy()))
self.assertTrue(np.array_equal(load_array4[0], obj4[0]))
# static mode
paddle.enable_static()
load_tensor1 = paddle.load(path1, return_numpy=False)
load_tensor2 = paddle.load(path2, return_numpy=False)
load_tensor3 = paddle.load(path3, return_numpy=False)
load_tensor4 = paddle.load(path4, return_numpy=False)
self.assertTrue(
np.array_equal(np.array(load_tensor1[0]), obj1[0].numpy()))
self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1]))
self.assertTrue(np.array_equal(np.array(load_tensor1[2]), obj1[2][1]))
for i in range(len(load_tensor1)):
self.assertTrue(
type(load_tensor1[i]) == type(load_tensor2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(v.numpy(), np.array(load_tensor2['k2'][k])))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(
isinstance(load_tensor3[0], paddle.fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(np.array(load_tensor3[0]), obj3[0].numpy()))
self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["state_dict"][k],
paddle.fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(
np.array(load_tensor3[2]["state_dict"][k]), v.numpy()))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["opt"][k],
paddle.fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(np.array(load_tensor3[2]["opt"][k]), v.numpy()))
self.assertTrue(load_tensor4[0], paddle.fluid.core.LoDTensor)
self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
load_array2 = paddle.load(path2, return_numpy=True)
load_array3 = paddle.load(path3, return_numpy=True)
load_array4 = paddle.load(path4, return_numpy=True)
self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy()))
self.assertTrue(np.array_equal(load_array1[1], obj1[1]))
self.assertTrue(np.array_equal(load_array1[2], obj1[2][1]))
for i in range(len(load_array1)):
self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k]))
self.assertTrue(load_array2['epoch'] == 123)
self.assertTrue(isinstance(load_array3[0], np.ndarray))
self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy()))
self.assertTrue(np.array_equal(load_array3[1], obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["state_dict"][k], v.numpy()))
for k, v in state_dict.items():
self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy()))
self.assertTrue(np.array_equal(load_array4[0], obj4[0]))
def test_save_load_complex_object_static_save(self):
paddle.enable_static()
with new_program_scope():
# create network
x = paddle.static.data(
name="x", shape=[None, IMAGE_SIZE], dtype='float32')
z = paddle.static.nn.fc(x, 10, bias_attr=False)
z = paddle.static.nn.fc(z, 128, bias_attr=False)
loss = fluid.layers.reduce_mean(z)
place = fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
prog = paddle.static.default_main_program()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
state_dict = prog.state_dict()
keys = list(state_dict.keys())
obj1 = [
state_dict[keys[0]], np.random.randn(5, 6),
('fake_weight', np.ones(
[7, 8], dtype='float32'))
]
obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123}
obj3 = (state_dict[keys[0]], np.ndarray(
[3, 4], dtype="float32"), {
"state_dict": state_dict,
"opt": state_dict
})
obj4 = (np.ndarray([3, 4], dtype="float32"), )
path1 = "test_save_load_any_complex_object_static/obj1"
path2 = "test_save_load_any_complex_object_static/obj2"
path3 = "test_save_load_any_complex_object_static/obj3"
path4 = "test_save_load_any_complex_object_static/obj4"
paddle.save(obj1, path1)
paddle.save(obj2, path2)
paddle.save(obj3, path3)
paddle.save(obj4, path4)
load_tensor1 = paddle.load(path1, return_numpy=False)
load_tensor2 = paddle.load(path2, return_numpy=False)
load_tensor3 = paddle.load(path3, return_numpy=False)
load_tensor4 = paddle.load(path4, return_numpy=False)
self.assertTrue(
np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0])))
self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1]))
self.assertTrue(
np.array_equal(np.array(load_tensor1[2]), obj1[2][1]))
for i in range(len(load_tensor1)):
self.assertTrue(
type(load_tensor1[i]) == type(load_tensor2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(
np.array(v), np.array(load_tensor2['k2'][k])))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(isinstance(load_tensor3[0], fluid.core.LoDTensor))
self.assertTrue(np.array_equal(np.array(load_tensor3[0]), obj3[0]))
self.assertTrue(isinstance(load_tensor3[1], fluid.core.LoDTensor))
self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["state_dict"][k],
fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(
np.array(load_tensor3[2]["state_dict"][k]), np.array(
v)))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["opt"][k], fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(
np.array(load_tensor3[2]["opt"][k]), np.array(v)))
self.assertTrue(isinstance(load_tensor4[0], fluid.core.LoDTensor))
self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
load_array2 = paddle.load(path2, return_numpy=True)
load_array3 = paddle.load(path3, return_numpy=True)
load_array4 = paddle.load(path4, return_numpy=True)
self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0])))
self.assertTrue(np.array_equal(load_array1[1], obj1[1]))
self.assertTrue(np.array_equal(load_array1[2], obj1[2][1]))
for i in range(len(load_array1)):
self.assertTrue(
type(load_array1[i]) == type(load_array2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(np.array(v), load_array2['k2'][k]))
self.assertTrue(load_array2['epoch'] == 123)
self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0])))
self.assertTrue(np.array_equal(load_array3[1], obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["state_dict"][k], np.array(
v)))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["opt"][k], np.array(v)))
self.assertTrue(np.array_equal(load_array4[0], obj4[0]))
# dygraph mode
paddle.disable_static()
load_tensor1 = paddle.load(path1, return_numpy=False)
load_tensor2 = paddle.load(path2, return_numpy=False)
load_tensor3 = paddle.load(path3, return_numpy=False)
load_tensor4 = paddle.load(path4, return_numpy=False)
self.assertTrue(
np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0])))
self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1]))
self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1]))
for i in range(len(load_tensor1)):
self.assertTrue(
type(load_tensor1[i]) == type(load_tensor2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(
np.array(v), np.array(load_tensor2['k2'][k])))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(isinstance(load_tensor3[0], fluid.core.VarBase))
self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0]))
self.assertTrue(isinstance(load_tensor3[1], fluid.core.VarBase))
self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["state_dict"][k],
fluid.core.VarBase))
self.assertTrue(
np.array_equal(load_tensor3[2]["state_dict"][k].numpy(),
np.array(v)))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["opt"][k], fluid.core.VarBase))
self.assertTrue(
np.array_equal(load_tensor3[2]["opt"][k].numpy(),
np.array(v)))
self.assertTrue(isinstance(load_tensor4[0], fluid.core.VarBase))
self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
load_array2 = paddle.load(path2, return_numpy=True)
load_array3 = paddle.load(path3, return_numpy=True)
load_array4 = paddle.load(path4, return_numpy=True)
self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0])))
self.assertTrue(np.array_equal(load_array1[1], obj1[1]))
self.assertTrue(np.array_equal(load_array1[2], obj1[2][1]))
for i in range(len(load_array1)):
self.assertTrue(
type(load_array1[i]) == type(load_array2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(np.array(v), load_array2['k2'][k]))
self.assertTrue(load_array2['epoch'] == 123)
self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0])))
self.assertTrue(np.array_equal(load_array3[1], obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["state_dict"][k], np.array(
v)))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["opt"][k], np.array(v)))
self.assertTrue(isinstance(load_array4[0], np.ndarray))
self.assertTrue(np.array_equal(load_array4[0], obj4[0]))
def test_varbase_binary_var(self):
paddle.disable_static()
varbase = paddle.randn([3, 2], dtype='float32')
path = 'test_paddle_save_load_varbase_binary_var/varbase'
paddle.save(varbase, path, use_binary_format=True)
load_array = paddle.load(path, return_numpy=True)
load_tensor = paddle.load(path, return_numpy=False)
origin_array = varbase.numpy()
load_tensor_array = load_tensor.numpy()
if paddle.fluid.core.is_compiled_with_cuda():
fluid.core._cuda_synchronize(paddle.CUDAPlace(0))
self.assertTrue(np.array_equal(origin_array, load_array))
self.assertTrue(np.array_equal(origin_array, load_tensor_array))
class TestSaveLoad(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def build_and_train_model(self):
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
# TODO: using new DataLoader cause unknown Timeout on windows, replace it
loader = random_batch_reader()
# train
train(layer, loader, loss_fn, adam)
return layer, adam
def check_load_state_dict(self, orig_dict, load_dict):
for var_name, value in orig_dict.items():
load_value = load_dict[var_name].numpy() if hasattr(
load_dict[var_name], 'numpy') else np.array(load_dict[var_name])
self.assertTrue(np.array_equal(value.numpy(), load_value))
def test_save_load(self):
layer, opt = self.build_and_train_model()
# save
layer_save_path = "test_paddle_save_load.linear.pdparams"
opt_save_path = "test_paddle_save_load.linear.pdopt"
layer_state_dict = layer.state_dict()
opt_state_dict = opt.state_dict()
paddle.save(layer_state_dict, layer_save_path)
paddle.save(opt_state_dict, opt_save_path)
# load
load_layer_state_dict = paddle.load(layer_save_path)
load_opt_state_dict = paddle.load(opt_save_path)
self.check_load_state_dict(layer_state_dict, load_layer_state_dict)
self.check_load_state_dict(opt_state_dict, load_opt_state_dict)
# test save load in static mode
paddle.enable_static()
static_save_path = "static_mode_test/test_paddle_save_load.linear.pdparams"
paddle.save(layer_state_dict, static_save_path)
load_static_state_dict = paddle.load(static_save_path)
self.check_load_state_dict(layer_state_dict, load_static_state_dict)
# error test cases, some tests relay base test above
# 1. test save obj not dict error
test_list = [1, 2, 3]
# 2. test save path format error
with self.assertRaises(ValueError):
paddle.save(layer_state_dict, "test_paddle_save_load.linear.model/")
# 3. test load path not exist error
with self.assertRaises(ValueError):
paddle.load("test_paddle_save_load.linear.params")
# 4. test load old save path error
with self.assertRaises(ValueError):
paddle.load("test_paddle_save_load.linear")
class TestSaveLoadProgram(unittest.TestCase):
def test_save_load_program(self):
paddle.enable_static()
with new_program_scope():
layer = LinearNet()
data = paddle.static.data(
name='x_static_save', shape=(None, IMAGE_SIZE), dtype='float32')
y_static = layer(data)
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
origin_main = main_program.desc.serialize_to_string()
origin_startup = startup_program.desc.serialize_to_string()
path1 = "test_paddle_save_load_program/main_program.pdmodel"
path2 = "test_paddle_save_load_program/startup_program.pdmodel"
paddle.save(main_program, path1)
paddle.save(startup_program, path2)
with new_program_scope():
load_main = paddle.load(path1).desc.serialize_to_string()
load_startup = paddle.load(path2).desc.serialize_to_string()
self.assertTrue(origin_main == load_main)
self.assertTrue(origin_startup == load_startup)
class TestSaveLoadLayer(unittest.TestCase):
def test_save_load_layer(self):
if six.PY2:
return
paddle.disable_static()
inps = paddle.randn([1, IMAGE_SIZE], dtype='float32')
layer1 = LinearNet()
layer2 = LinearNet()
layer1.eval()
layer2.eval()
origin = (layer1(inps), layer2(inps))
path = "test_save_load_layer_/layer.pdmodel"
paddle.save((layer1, layer2), path)
# static
paddle.enable_static()
with self.assertRaises(ValueError):
paddle.load(path)
# dygraph
paddle.disable_static()
loaded_layer = paddle.load(path)
loaded_result = [l(inps) for l in loaded_layer]
for i in range(len(origin)):
self.assertTrue((origin[i] - loaded_result[i]).abs().max() < 1e-10)
if __name__ == '__main__':
unittest.main() | python/paddle/fluid/tests/unittests/test_paddle_save_load.py |
from __future__ import print_function
import unittest
import numpy as np
import os
import sys
import six
import paddle
import paddle.nn as nn
import paddle.optimizer as opt
import paddle.fluid as fluid
from paddle.fluid.optimizer import Adam
import paddle.fluid.framework as framework
from test_imperative_base import new_program_scope
from paddle.optimizer.lr import LRScheduler
BATCH_SIZE = 16
BATCH_NUM = 4
EPOCH_NUM = 4
SEED = 10
IMAGE_SIZE = 784
CLASS_NUM = 10
if six.PY2:
LARGE_PARAM = 2**2
else:
LARGE_PARAM = 2**26
def random_batch_reader():
def _get_random_inputs_and_labels():
np.random.seed(SEED)
image = np.random.random([BATCH_SIZE, IMAGE_SIZE]).astype('float32')
label = np.random.randint(0, CLASS_NUM - 1, (
BATCH_SIZE,
1, )).astype('int64')
return image, label
def __reader__():
for _ in range(BATCH_NUM):
batch_image, batch_label = _get_random_inputs_and_labels()
batch_image = paddle.to_tensor(batch_image)
batch_label = paddle.to_tensor(batch_label)
yield batch_image, batch_label
return __reader__
class LinearNet(nn.Layer):
def __init__(self):
super(LinearNet, self).__init__()
self._linear = nn.Linear(IMAGE_SIZE, CLASS_NUM)
def forward(self, x):
return self._linear(x)
class LayerWithLargeParameters(paddle.nn.Layer):
def __init__(self):
super(LayerWithLargeParameters, self).__init__()
self._l = paddle.nn.Linear(10, LARGE_PARAM)
def forward(self, x):
y = self._l(x)
return y
def train(layer, loader, loss_fn, opt):
for epoch_id in range(EPOCH_NUM):
for batch_id, (image, label) in enumerate(loader()):
out = layer(image)
loss = loss_fn(out, label)
loss.backward()
opt.step()
opt.clear_grad()
class TestSaveLoadLargeParameters(unittest.TestCase):
def setUp(self):
pass
def test_large_parameters_paddle_save(self):
# enable dygraph mode
paddle.disable_static()
# create network
layer = LayerWithLargeParameters()
save_dict = layer.state_dict()
path = os.path.join("test_paddle_save_load_large_param_save",
"layer.pdparams")
if six.PY2:
protocol = 2
else:
protocol = 4
paddle.save(save_dict, path, protocol=protocol)
dict_load = paddle.load(path)
# compare results before and after saving
for key, value in save_dict.items():
self.assertTrue(
np.array_equal(dict_load[key].numpy(), value.numpy()))
class TestSaveLoadPickle(unittest.TestCase):
def test_pickle_protocol(self):
# enable dygraph mode
paddle.disable_static()
# create network
layer = LinearNet()
save_dict = layer.state_dict()
path = os.path.join("test_paddle_save_load_pickle_protocol",
"layer.pdparams")
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 2.0)
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 1)
with self.assertRaises(ValueError):
paddle.save(save_dict, path, 5)
protocols = [2, ]
if sys.version_info.major >= 3 and sys.version_info.minor >= 4:
protocols += [3, 4]
for protocol in protocols:
paddle.save(save_dict, path, pickle_protocol=protocol)
dict_load = paddle.load(path)
# compare results before and after saving
for key, value in save_dict.items():
self.assertTrue(
np.array_equal(dict_load[key].numpy(), value.numpy()))
class TestSaveLoadAny(unittest.TestCase):
def set_zero(self, prog, place, scope=None):
if scope is None:
scope = fluid.global_scope()
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
ten = scope.find_var(var.name).get_tensor()
if ten is not None:
ten.set(np.zeros_like(np.array(ten)), place)
new_t = np.array(scope.find_var(var.name).get_tensor())
self.assertTrue(np.sum(np.abs(new_t)) == 0)
def replace_static_save(self, program, model_path, pickle_protocol=2):
with self.assertRaises(TypeError):
program.state_dict(1)
with self.assertRaises(TypeError):
program.state_dict(scope=1)
with self.assertRaises(ValueError):
program.state_dict('x')
state_dict_param = program.state_dict('param')
paddle.save(state_dict_param, model_path + '.pdparams')
state_dict_opt = program.state_dict('opt')
paddle.save(state_dict_opt, model_path + '.pdopt')
state_dict_all = program.state_dict()
paddle.save(state_dict_opt, model_path + '.pdall')
def replace_static_load(self, program, model_path):
with self.assertRaises(TypeError):
program.set_state_dict(1)
state_dict_param = paddle.load(model_path + '.pdparams')
state_dict_param['fake_var_name.@@'] = np.random.randn(1, 2)
state_dict_param['static_x'] = 'UserWarning'
program.set_state_dict(state_dict_param)
state_dict_param['static_x'] = np.random.randn(1, 2)
program.set_state_dict(state_dict_param)
program.set_state_dict(state_dict_param)
state_dict_opt = paddle.load(model_path + '.pdopt')
program.set_state_dict(state_dict_opt)
def test_replace_static_save_load(self):
paddle.enable_static()
with new_program_scope():
x = paddle.static.data(
name="static_x", shape=[None, IMAGE_SIZE], dtype='float32')
z = paddle.static.nn.fc(x, 10)
z = paddle.static.nn.fc(z, 10, bias_attr=False)
loss = fluid.layers.reduce_mean(z)
opt = Adam(learning_rate=1e-3)
opt.minimize(loss)
place = paddle.CPUPlace()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
prog = paddle.static.default_main_program()
fake_inputs = np.random.randn(2, IMAGE_SIZE).astype('float32')
exe.run(prog, feed={'static_x': fake_inputs}, fetch_list=[loss])
base_map = {}
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_map[var.name] = t
path = os.path.join("test_replace_static_save_load", "model")
# paddle.save, legacy paddle.fluid.load
self.replace_static_save(prog, path)
self.set_zero(prog, place)
paddle.fluid.io.load(prog, path)
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, np.array(base_t)))
# legacy paddle.fluid.save, paddle.load
paddle.fluid.io.save(prog, path)
self.set_zero(prog, place)
self.replace_static_load(prog, path)
for var in prog.list_vars():
if isinstance(var, framework.Parameter) or var.persistable:
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, base_t))
# test for return tensor
path_vars = 'test_replace_save_load_return_tensor_static/model'
for var in prog.list_vars():
if var.persistable:
tensor = var.get_value(fluid.global_scope())
paddle.save(tensor, os.path.join(path_vars, var.name))
with self.assertRaises(TypeError):
var.get_value('fluid.global_scope()')
with self.assertRaises(ValueError):
x.get_value()
with self.assertRaises(TypeError):
x.set_value('1')
fake_data = np.zeros([3, 2, 1, 2, 3])
with self.assertRaises(TypeError):
x.set_value(fake_data, '1')
with self.assertRaises(ValueError):
x.set_value(fake_data)
with self.assertRaises(ValueError):
var.set_value(fake_data)
# set var to zero
self.set_zero(prog, place)
for var in prog.list_vars():
if var.persistable:
tensor = paddle.load(
os.path.join(path_vars, var.name), return_numpy=False)
var.set_value(tensor)
new_t = np.array(fluid.global_scope().find_var(var.name)
.get_tensor())
base_t = base_map[var.name]
self.assertTrue(np.array_equal(new_t, base_t))
def test_paddle_save_load_v2(self):
paddle.disable_static()
class StepDecay(LRScheduler):
def __init__(self,
learning_rate,
step_size,
gamma=0.1,
last_epoch=-1,
verbose=False):
self.step_size = step_size
self.gamma = gamma
super(StepDecay, self).__init__(learning_rate, last_epoch,
verbose)
def get_lr(self):
i = self.last_epoch // self.step_size
return self.base_lr * (self.gamma**i)
layer = LinearNet()
inps = paddle.randn([2, IMAGE_SIZE])
adam = opt.Adam(
learning_rate=StepDecay(0.1, 1), parameters=layer.parameters())
y = layer(inps)
y.mean().backward()
adam.step()
state_dict = adam.state_dict()
path = 'paddle_save_load_v2/model.pdparams'
with self.assertRaises(TypeError):
paddle.save(state_dict, path, use_binary_format='False')
# legacy paddle.save, paddle.load
paddle.framework.io._legacy_save(state_dict, path)
load_dict_tensor = paddle.load(path, return_numpy=False)
# legacy paddle.load, paddle.save
paddle.save(state_dict, path)
load_dict_np = paddle.framework.io._legacy_load(path)
for k, v in state_dict.items():
if isinstance(v, dict):
self.assertTrue(v == load_dict_tensor[k])
else:
self.assertTrue(
np.array_equal(v.numpy(), load_dict_tensor[k].numpy()))
if not np.array_equal(v.numpy(), load_dict_np[k]):
print(v.numpy())
print(load_dict_np[k])
self.assertTrue(np.array_equal(v.numpy(), load_dict_np[k]))
def test_single_pickle_var_dygraph(self):
# enable dygraph mode
paddle.disable_static()
layer = LinearNet()
path = 'paddle_save_load_v2/var_dygraph'
tensor = layer._linear.weight
with self.assertRaises(ValueError):
paddle.save(tensor, path, pickle_protocol='3')
with self.assertRaises(ValueError):
paddle.save(tensor, path, pickle_protocol=5)
paddle.save(tensor, path)
t_dygraph = paddle.load(path)
np_dygraph = paddle.load(path, return_numpy=True)
self.assertTrue(isinstance(t_dygraph, paddle.fluid.core.VarBase))
self.assertTrue(np.array_equal(tensor.numpy(), np_dygraph))
self.assertTrue(np.array_equal(tensor.numpy(), t_dygraph.numpy()))
paddle.enable_static()
lod_static = paddle.load(path)
np_static = paddle.load(path, return_numpy=True)
self.assertTrue(isinstance(lod_static, paddle.fluid.core.LoDTensor))
self.assertTrue(np.array_equal(tensor.numpy(), np_static))
self.assertTrue(np.array_equal(tensor.numpy(), np.array(lod_static)))
def test_single_pickle_var_static(self):
# enable static mode
paddle.enable_static()
with new_program_scope():
# create network
x = paddle.static.data(
name="x", shape=[None, IMAGE_SIZE], dtype='float32')
z = paddle.static.nn.fc(x, 128)
loss = fluid.layers.reduce_mean(z)
place = fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
prog = paddle.static.default_main_program()
for var in prog.list_vars():
if list(var.shape) == [IMAGE_SIZE, 128]:
tensor = var.get_value()
break
scope = fluid.global_scope()
origin_tensor = np.array(tensor)
path = 'test_single_pickle_var_static/var'
paddle.save(tensor, path)
self.set_zero(prog, place, scope)
# static load
lod_static = paddle.load(path)
np_static = paddle.load(path, return_numpy=True)
# set_tensor(np.ndarray)
var.set_value(np_static, scope)
self.assertTrue(np.array_equal(origin_tensor, np.array(tensor)))
# set_tensor(LoDTensor)
self.set_zero(prog, place, scope)
var.set_value(lod_static, scope)
self.assertTrue(np.array_equal(origin_tensor, np.array(tensor)))
# enable dygraph mode
paddle.disable_static()
var_dygraph = paddle.load(path)
np_dygraph = paddle.load(path, return_numpy=True)
self.assertTrue(np.array_equal(np.array(tensor), np_dygraph))
self.assertTrue(np.array_equal(np.array(tensor), var_dygraph.numpy()))
def test_dygraph_save_static_load(self):
inps = np.random.randn(1, IMAGE_SIZE).astype('float32')
path = 'test_dygraph_save_static_load/dy-static.pdparams'
paddle.disable_static()
with paddle.utils.unique_name.guard():
layer = LinearNet()
state_dict_dy = layer.state_dict()
paddle.save(state_dict_dy, path)
paddle.enable_static()
with new_program_scope():
layer = LinearNet()
data = paddle.static.data(
name='x_static_save', shape=(None, IMAGE_SIZE), dtype='float32')
y_static = layer(data)
program = paddle.static.default_main_program()
place = fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = paddle.static.Executor(paddle.CPUPlace())
exe.run(paddle.static.default_startup_program())
state_dict = paddle.load(path, keep_name_table=True)
program.set_state_dict(state_dict)
state_dict_param = program.state_dict("param")
for name, tensor in state_dict_dy.items():
self.assertTrue(
np.array_equal(tensor.numpy(),
np.array(state_dict_param[tensor.name])))
def test_save_load_complex_object_dygraph_save(self):
paddle.disable_static()
layer = paddle.nn.Linear(3, 4)
state_dict = layer.state_dict()
obj1 = [
paddle.randn(
[3, 4], dtype='float32'), np.random.randn(5, 6),
('fake_weight', np.ones(
[7, 8], dtype='float32'))
]
obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123}
obj3 = (paddle.randn(
[5, 4], dtype='float32'), np.random.randn(3, 4).astype("float32"), {
"state_dict": state_dict,
"opt": state_dict
})
obj4 = (np.random.randn(5, 6), (123, ))
path1 = "test_save_load_any_complex_object_dygraph/obj1"
path2 = "test_save_load_any_complex_object_dygraph/obj2"
path3 = "test_save_load_any_complex_object_dygraph/obj3"
path4 = "test_save_load_any_complex_object_dygraph/obj4"
paddle.save(obj1, path1)
paddle.save(obj2, path2)
paddle.save(obj3, path3)
paddle.save(obj4, path4)
load_tensor1 = paddle.load(path1, return_numpy=False)
load_tensor2 = paddle.load(path2, return_numpy=False)
load_tensor3 = paddle.load(path3, return_numpy=False)
load_tensor4 = paddle.load(path4, return_numpy=False)
self.assertTrue(
np.array_equal(load_tensor1[0].numpy(), obj1[0].numpy()))
self.assertTrue(np.array_equal(load_tensor1[1], obj1[1]))
self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1]))
for i in range(len(load_tensor1)):
self.assertTrue(
type(load_tensor1[i]) == type(load_tensor2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(v.numpy(), load_tensor2['k2'][k].numpy()))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(
np.array_equal(load_tensor3[0].numpy(), obj3[0].numpy()))
self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_tensor3[2]["state_dict"][k].numpy(),
v.numpy()))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_tensor3[2]["opt"][k].numpy(), v.numpy()))
self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
load_array2 = paddle.load(path2, return_numpy=True)
load_array3 = paddle.load(path3, return_numpy=True)
load_array4 = paddle.load(path4, return_numpy=True)
self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy()))
self.assertTrue(np.array_equal(load_array1[1], obj1[1]))
self.assertTrue(np.array_equal(load_array1[2], obj1[2][1]))
for i in range(len(load_array1)):
self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k]))
self.assertTrue(load_array2['epoch'] == 123)
self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy()))
self.assertTrue(np.array_equal(load_array3[1], obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["state_dict"][k], v.numpy()))
for k, v in state_dict.items():
self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy()))
self.assertTrue(np.array_equal(load_array4[0], obj4[0]))
# static mode
paddle.enable_static()
load_tensor1 = paddle.load(path1, return_numpy=False)
load_tensor2 = paddle.load(path2, return_numpy=False)
load_tensor3 = paddle.load(path3, return_numpy=False)
load_tensor4 = paddle.load(path4, return_numpy=False)
self.assertTrue(
np.array_equal(np.array(load_tensor1[0]), obj1[0].numpy()))
self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1]))
self.assertTrue(np.array_equal(np.array(load_tensor1[2]), obj1[2][1]))
for i in range(len(load_tensor1)):
self.assertTrue(
type(load_tensor1[i]) == type(load_tensor2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(v.numpy(), np.array(load_tensor2['k2'][k])))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(
isinstance(load_tensor3[0], paddle.fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(np.array(load_tensor3[0]), obj3[0].numpy()))
self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["state_dict"][k],
paddle.fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(
np.array(load_tensor3[2]["state_dict"][k]), v.numpy()))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["opt"][k],
paddle.fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(np.array(load_tensor3[2]["opt"][k]), v.numpy()))
self.assertTrue(load_tensor4[0], paddle.fluid.core.LoDTensor)
self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
load_array2 = paddle.load(path2, return_numpy=True)
load_array3 = paddle.load(path3, return_numpy=True)
load_array4 = paddle.load(path4, return_numpy=True)
self.assertTrue(np.array_equal(load_array1[0], obj1[0].numpy()))
self.assertTrue(np.array_equal(load_array1[1], obj1[1]))
self.assertTrue(np.array_equal(load_array1[2], obj1[2][1]))
for i in range(len(load_array1)):
self.assertTrue(type(load_array1[i]) == type(load_array2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(np.array_equal(v.numpy(), load_array2['k2'][k]))
self.assertTrue(load_array2['epoch'] == 123)
self.assertTrue(isinstance(load_array3[0], np.ndarray))
self.assertTrue(np.array_equal(load_array3[0], obj3[0].numpy()))
self.assertTrue(np.array_equal(load_array3[1], obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["state_dict"][k], v.numpy()))
for k, v in state_dict.items():
self.assertTrue(np.array_equal(load_array3[2]["opt"][k], v.numpy()))
self.assertTrue(np.array_equal(load_array4[0], obj4[0]))
def test_save_load_complex_object_static_save(self):
paddle.enable_static()
with new_program_scope():
# create network
x = paddle.static.data(
name="x", shape=[None, IMAGE_SIZE], dtype='float32')
z = paddle.static.nn.fc(x, 10, bias_attr=False)
z = paddle.static.nn.fc(z, 128, bias_attr=False)
loss = fluid.layers.reduce_mean(z)
place = fluid.CPUPlace(
) if not paddle.fluid.core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
prog = paddle.static.default_main_program()
exe = paddle.static.Executor(place)
exe.run(paddle.static.default_startup_program())
state_dict = prog.state_dict()
keys = list(state_dict.keys())
obj1 = [
state_dict[keys[0]], np.random.randn(5, 6),
('fake_weight', np.ones(
[7, 8], dtype='float32'))
]
obj2 = {'k1': obj1, 'k2': state_dict, 'epoch': 123}
obj3 = (state_dict[keys[0]], np.ndarray(
[3, 4], dtype="float32"), {
"state_dict": state_dict,
"opt": state_dict
})
obj4 = (np.ndarray([3, 4], dtype="float32"), )
path1 = "test_save_load_any_complex_object_static/obj1"
path2 = "test_save_load_any_complex_object_static/obj2"
path3 = "test_save_load_any_complex_object_static/obj3"
path4 = "test_save_load_any_complex_object_static/obj4"
paddle.save(obj1, path1)
paddle.save(obj2, path2)
paddle.save(obj3, path3)
paddle.save(obj4, path4)
load_tensor1 = paddle.load(path1, return_numpy=False)
load_tensor2 = paddle.load(path2, return_numpy=False)
load_tensor3 = paddle.load(path3, return_numpy=False)
load_tensor4 = paddle.load(path4, return_numpy=False)
self.assertTrue(
np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0])))
self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1]))
self.assertTrue(
np.array_equal(np.array(load_tensor1[2]), obj1[2][1]))
for i in range(len(load_tensor1)):
self.assertTrue(
type(load_tensor1[i]) == type(load_tensor2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(
np.array(v), np.array(load_tensor2['k2'][k])))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(isinstance(load_tensor3[0], fluid.core.LoDTensor))
self.assertTrue(np.array_equal(np.array(load_tensor3[0]), obj3[0]))
self.assertTrue(isinstance(load_tensor3[1], fluid.core.LoDTensor))
self.assertTrue(np.array_equal(np.array(load_tensor3[1]), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["state_dict"][k],
fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(
np.array(load_tensor3[2]["state_dict"][k]), np.array(
v)))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["opt"][k], fluid.core.LoDTensor))
self.assertTrue(
np.array_equal(
np.array(load_tensor3[2]["opt"][k]), np.array(v)))
self.assertTrue(isinstance(load_tensor4[0], fluid.core.LoDTensor))
self.assertTrue(np.array_equal(np.array(load_tensor4[0]), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
load_array2 = paddle.load(path2, return_numpy=True)
load_array3 = paddle.load(path3, return_numpy=True)
load_array4 = paddle.load(path4, return_numpy=True)
self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0])))
self.assertTrue(np.array_equal(load_array1[1], obj1[1]))
self.assertTrue(np.array_equal(load_array1[2], obj1[2][1]))
for i in range(len(load_array1)):
self.assertTrue(
type(load_array1[i]) == type(load_array2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(np.array(v), load_array2['k2'][k]))
self.assertTrue(load_array2['epoch'] == 123)
self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0])))
self.assertTrue(np.array_equal(load_array3[1], obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["state_dict"][k], np.array(
v)))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["opt"][k], np.array(v)))
self.assertTrue(np.array_equal(load_array4[0], obj4[0]))
# dygraph mode
paddle.disable_static()
load_tensor1 = paddle.load(path1, return_numpy=False)
load_tensor2 = paddle.load(path2, return_numpy=False)
load_tensor3 = paddle.load(path3, return_numpy=False)
load_tensor4 = paddle.load(path4, return_numpy=False)
self.assertTrue(
np.array_equal(np.array(load_tensor1[0]), np.array(obj1[0])))
self.assertTrue(np.array_equal(np.array(load_tensor1[1]), obj1[1]))
self.assertTrue(np.array_equal(load_tensor1[2].numpy(), obj1[2][1]))
for i in range(len(load_tensor1)):
self.assertTrue(
type(load_tensor1[i]) == type(load_tensor2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(
np.array(v), np.array(load_tensor2['k2'][k])))
self.assertTrue(load_tensor2['epoch'] == 123)
self.assertTrue(isinstance(load_tensor3[0], fluid.core.VarBase))
self.assertTrue(np.array_equal(load_tensor3[0].numpy(), obj3[0]))
self.assertTrue(isinstance(load_tensor3[1], fluid.core.VarBase))
self.assertTrue(np.array_equal(load_tensor3[1].numpy(), obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["state_dict"][k],
fluid.core.VarBase))
self.assertTrue(
np.array_equal(load_tensor3[2]["state_dict"][k].numpy(),
np.array(v)))
for k, v in state_dict.items():
self.assertTrue(
isinstance(load_tensor3[2]["opt"][k], fluid.core.VarBase))
self.assertTrue(
np.array_equal(load_tensor3[2]["opt"][k].numpy(),
np.array(v)))
self.assertTrue(isinstance(load_tensor4[0], fluid.core.VarBase))
self.assertTrue(np.array_equal(load_tensor4[0].numpy(), obj4[0]))
load_array1 = paddle.load(path1, return_numpy=True)
load_array2 = paddle.load(path2, return_numpy=True)
load_array3 = paddle.load(path3, return_numpy=True)
load_array4 = paddle.load(path4, return_numpy=True)
self.assertTrue(np.array_equal(load_array1[0], np.array(obj1[0])))
self.assertTrue(np.array_equal(load_array1[1], obj1[1]))
self.assertTrue(np.array_equal(load_array1[2], obj1[2][1]))
for i in range(len(load_array1)):
self.assertTrue(
type(load_array1[i]) == type(load_array2['k1'][i]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(np.array(v), load_array2['k2'][k]))
self.assertTrue(load_array2['epoch'] == 123)
self.assertTrue(np.array_equal(load_array3[0], np.array(obj3[0])))
self.assertTrue(np.array_equal(load_array3[1], obj3[1]))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["state_dict"][k], np.array(
v)))
for k, v in state_dict.items():
self.assertTrue(
np.array_equal(load_array3[2]["opt"][k], np.array(v)))
self.assertTrue(isinstance(load_array4[0], np.ndarray))
self.assertTrue(np.array_equal(load_array4[0], obj4[0]))
def test_varbase_binary_var(self):
paddle.disable_static()
varbase = paddle.randn([3, 2], dtype='float32')
path = 'test_paddle_save_load_varbase_binary_var/varbase'
paddle.save(varbase, path, use_binary_format=True)
load_array = paddle.load(path, return_numpy=True)
load_tensor = paddle.load(path, return_numpy=False)
origin_array = varbase.numpy()
load_tensor_array = load_tensor.numpy()
if paddle.fluid.core.is_compiled_with_cuda():
fluid.core._cuda_synchronize(paddle.CUDAPlace(0))
self.assertTrue(np.array_equal(origin_array, load_array))
self.assertTrue(np.array_equal(origin_array, load_tensor_array))
class TestSaveLoad(unittest.TestCase):
def setUp(self):
# enable dygraph mode
paddle.disable_static()
# config seed
paddle.seed(SEED)
paddle.framework.random._manual_program_seed(SEED)
def build_and_train_model(self):
# create network
layer = LinearNet()
loss_fn = nn.CrossEntropyLoss()
adam = opt.Adam(learning_rate=0.001, parameters=layer.parameters())
# create data loader
# TODO: using new DataLoader cause unknown Timeout on windows, replace it
loader = random_batch_reader()
# train
train(layer, loader, loss_fn, adam)
return layer, adam
def check_load_state_dict(self, orig_dict, load_dict):
for var_name, value in orig_dict.items():
load_value = load_dict[var_name].numpy() if hasattr(
load_dict[var_name], 'numpy') else np.array(load_dict[var_name])
self.assertTrue(np.array_equal(value.numpy(), load_value))
def test_save_load(self):
layer, opt = self.build_and_train_model()
# save
layer_save_path = "test_paddle_save_load.linear.pdparams"
opt_save_path = "test_paddle_save_load.linear.pdopt"
layer_state_dict = layer.state_dict()
opt_state_dict = opt.state_dict()
paddle.save(layer_state_dict, layer_save_path)
paddle.save(opt_state_dict, opt_save_path)
# load
load_layer_state_dict = paddle.load(layer_save_path)
load_opt_state_dict = paddle.load(opt_save_path)
self.check_load_state_dict(layer_state_dict, load_layer_state_dict)
self.check_load_state_dict(opt_state_dict, load_opt_state_dict)
# test save load in static mode
paddle.enable_static()
static_save_path = "static_mode_test/test_paddle_save_load.linear.pdparams"
paddle.save(layer_state_dict, static_save_path)
load_static_state_dict = paddle.load(static_save_path)
self.check_load_state_dict(layer_state_dict, load_static_state_dict)
# error test cases, some tests relay base test above
# 1. test save obj not dict error
test_list = [1, 2, 3]
# 2. test save path format error
with self.assertRaises(ValueError):
paddle.save(layer_state_dict, "test_paddle_save_load.linear.model/")
# 3. test load path not exist error
with self.assertRaises(ValueError):
paddle.load("test_paddle_save_load.linear.params")
# 4. test load old save path error
with self.assertRaises(ValueError):
paddle.load("test_paddle_save_load.linear")
class TestSaveLoadProgram(unittest.TestCase):
def test_save_load_program(self):
paddle.enable_static()
with new_program_scope():
layer = LinearNet()
data = paddle.static.data(
name='x_static_save', shape=(None, IMAGE_SIZE), dtype='float32')
y_static = layer(data)
main_program = paddle.static.default_main_program()
startup_program = paddle.static.default_startup_program()
origin_main = main_program.desc.serialize_to_string()
origin_startup = startup_program.desc.serialize_to_string()
path1 = "test_paddle_save_load_program/main_program.pdmodel"
path2 = "test_paddle_save_load_program/startup_program.pdmodel"
paddle.save(main_program, path1)
paddle.save(startup_program, path2)
with new_program_scope():
load_main = paddle.load(path1).desc.serialize_to_string()
load_startup = paddle.load(path2).desc.serialize_to_string()
self.assertTrue(origin_main == load_main)
self.assertTrue(origin_startup == load_startup)
class TestSaveLoadLayer(unittest.TestCase):
def test_save_load_layer(self):
if six.PY2:
return
paddle.disable_static()
inps = paddle.randn([1, IMAGE_SIZE], dtype='float32')
layer1 = LinearNet()
layer2 = LinearNet()
layer1.eval()
layer2.eval()
origin = (layer1(inps), layer2(inps))
path = "test_save_load_layer_/layer.pdmodel"
paddle.save((layer1, layer2), path)
# static
paddle.enable_static()
with self.assertRaises(ValueError):
paddle.load(path)
# dygraph
paddle.disable_static()
loaded_layer = paddle.load(path)
loaded_result = [l(inps) for l in loaded_layer]
for i in range(len(origin)):
self.assertTrue((origin[i] - loaded_result[i]).abs().max() < 1e-10)
if __name__ == '__main__':
unittest.main() | 0.619586 | 0.29054 |
import unittest
import numpy as np
from mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm
from mo.utils.unittest.graph import build_graph
from mo.utils.ir_engine.compare_graphs import compare_graphs
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# ScaleShift layer
'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},
'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},
# Reshape
'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},
'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},
'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None},
# BatchNorm operation
'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},
'const_bn_const': {'value': None, 'shape': None, 'kind': 'op'},
'bn_const': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op'},
'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op'},
'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_var': {'value': None, 'shape': None, 'kind': 'op'},
'bn_var': {'value': None, 'shape': None, 'kind': 'data'},
'bn_data': {'value': None, 'shape': None, 'kind': 'data'},
# Concat1 operation
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
'concat_data': {'value': None, 'shape': None, 'kind': 'data'},
'op_output': {'kind': 'op', 'op': 'Result'}
}
class ScaleShiftToMulAdd(unittest.TestCase):
# ScaleShift -> Mul
def test_scaleshift_to_mul_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul
def test_scaleshift2_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul (axis = 1)
def test_scaleshift2_axis1_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'scaleshift_1': {'axis': 1},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul (Zero biases)
def test_scaleshift_to_mul_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul->Add
def test_scaleshift_to_mul_add(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
'add_1': {'can_be_fused': True},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> None (Zero weights and biases)
def test_scaleshift_to_nothing(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}
,nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> ScaleShift (can_be_fused=False)
def test_scaleshift_can_be_fused(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'const_scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
self.assertTrue(flag, resp)
class BatchNormDecomposition(unittest.TestCase):
def test_bn_decomposition_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_data': {'shape': np.array([1, 227, 227, 3])},
'mul_1': {'can_be_fused': True},
'mul_2': {'can_be_fused': True},
'add_1': {'can_be_fused': True},
'add_2': {'can_be_fused': True},
'concat_data': {}
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_batch_norm(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
self.assertTrue(flag, resp)
# 'can_be_fused': False for BatchNorm
def test_bn_decomposition_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2, 'can_be_fused': False},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_data': {'shape': np.array([1, 227, 227, 3])},
'mul_1': {'can_be_fused': False},
'mul_2': {'can_be_fused': False},
'add_1': {'can_be_fused': False},
'add_2': {'can_be_fused': False},
'concat_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_batch_norm(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
self.assertTrue(flag, resp) | model-optimizer/mo/middle/passes/fusing/decomposition_test.py | import unittest
import numpy as np
from mo.middle.passes.fusing.decomposition import convert_scale_shift_to_mul_add, convert_batch_norm
from mo.utils.unittest.graph import build_graph
from mo.utils.ir_engine.compare_graphs import compare_graphs
nodes_attributes = {
'placeholder_1': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_1_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
'placeholder_2': {'shape': None, 'type': 'Parameter', 'kind': 'op', 'op': 'Parameter'},
'placeholder_2_data': {'value': None, 'shape': None, 'kind': 'data', 'data_type': None},
# ScaleShift layer
'scaleshift_1': {'type': 'ScaleShift', 'kind': 'op', 'op': 'ScaleShift', 'axis': 0},
'const_scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'const_scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'op'},
'scaleshift_1_b': {'value': None, 'shape': None, 'kind': 'data'},
'scaleshift_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_1': {'type': None, 'value': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_1_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_1': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_1_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_1_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_1_data': {'value': None, 'shape': None, 'kind': 'data'},
# Mul and Add operations
'mul_2': {'type': None, 'kind': 'op', 'op': 'Mul'},
'const_mul_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'mul_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'mul_2_data': {'value': None, 'shape': None, 'kind': 'data'},
'add_2': {'type': None, 'kind': 'op', 'op': 'Add'},
'const_add_2_w': {'value': None, 'shape': None, 'kind': 'op'},
'add_2_w': {'value': None, 'shape': None, 'kind': 'data'},
'add_2_data': {'value': None, 'shape': None, 'kind': 'data'},
# Reshape
'placeholder_2/Reshape_': {'type': 'Reshape', 'kind': 'op', 'op': 'Reshape'},
'placeholder_2/Reshape_data': {'value': None, 'shape': None, 'kind': 'data'},
'placeholder_2/Reshape_const': {'type': 'Const', 'kind': 'op', 'op': 'Const', 'value': None},
'placeholder_2/Reshape_const_data': {'kind': 'data', 'value': None, 'shape': None},
# BatchNorm operation
'bn_op': {'type': None, 'kind': 'op', 'op': 'BatchNorm', 'can_be_fused': True},
'const_bn_const': {'value': None, 'shape': None, 'kind': 'op'},
'bn_const': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_beta': {'value': None, 'shape': None, 'kind': 'op'},
'bn_beta': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_mean': {'value': None, 'shape': None, 'kind': 'op'},
'bn_mean': {'value': None, 'shape': None, 'kind': 'data'},
'const_bn_var': {'value': None, 'shape': None, 'kind': 'op'},
'bn_var': {'value': None, 'shape': None, 'kind': 'data'},
'bn_data': {'value': None, 'shape': None, 'kind': 'data'},
# Concat1 operation
'concat': {'type': 'Concat', 'kind': 'op', 'op': 'Concat'},
'concat_data': {'value': None, 'shape': None, 'kind': 'data'},
'op_output': {'kind': 'op', 'op': 'Result'}
}
class ScaleShiftToMulAdd(unittest.TestCase):
# ScaleShift -> Mul
def test_scaleshift_to_mul_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul
def test_scaleshift2_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([1, 227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift 2 inputs-> Mul (axis = 1)
def test_scaleshift2_axis1_to_mul(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_1_data', 'scaleshift_1'),
('placeholder_2_data', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'scaleshift_1': {'axis': 1},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_2', 'placeholder_2_data'),
('placeholder_2_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_const', 'placeholder_2/Reshape_const_data'),
('placeholder_2/Reshape_const_data', 'placeholder_2/Reshape_'),
('placeholder_2/Reshape_', 'placeholder_2/Reshape_data'),
('placeholder_1_data', 'mul_1'),
('placeholder_2/Reshape_data', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'placeholder_2_data': {'shape': np.array([227])},
'placeholder_2/Reshape_const': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_const_data': {'value': np.array([1, 227, 1, 1]), 'shape': [4]},
'placeholder_2/Reshape_data': {'shape': np.array([1, 227, 1, 1])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul (Zero biases)
def test_scaleshift_to_mul_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> Mul->Add
def test_scaleshift_to_mul_add(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'scaleshift_1_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_1_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'add_1_w': {'shape': np.array([3]), 'value': np.array([3, 2, 1])},
'mul_1_data': {'shape': np.array([1, 227, 227, 3])},
'add_1': {'can_be_fused': True},
'mul_1': {'can_be_fused': True},
'scaleshift_1_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> None (Zero weights and biases)
def test_scaleshift_to_nothing(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])}}
,nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'placeholder_1')
self.assertTrue(flag, resp)
# ScaleShift -> ScaleShift (can_be_fused=False)
def test_scaleshift_can_be_fused(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'scaleshift_1'),
('const_scaleshift_1_w', 'scaleshift_1_w'),
('const_scaleshift_1_b', 'scaleshift_1_b'),
('scaleshift_1_w', 'scaleshift_1'),
('scaleshift_1_b', 'scaleshift_1'),
('scaleshift_1', 'scaleshift_1_data'),
('scaleshift_1_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'scaleshift_1_w': {'shape': np.array([3]), 'value': np.array([1, 1, 1])},
'const_scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1_b': {'shape': np.array([3]), 'value': np.array([0, 0, 0])},
'scaleshift_1': {'can_be_fused': False},
'scaleshift_1_data': {'shape': np.array([1, 227, 227, 3])}
})
convert_scale_shift_to_mul_add(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'scaleshift_1_data')
self.assertTrue(flag, resp)
class BatchNormDecomposition(unittest.TestCase):
def test_bn_decomposition_1(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
}, nodes_with_edges_only=True)
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_data': {'shape': np.array([1, 227, 227, 3])},
'mul_1': {'can_be_fused': True},
'mul_2': {'can_be_fused': True},
'add_1': {'can_be_fused': True},
'add_2': {'can_be_fused': True},
'concat_data': {}
}, nodes_with_edges_only=True)
graph.graph['layout'] = 'NHWC'
convert_batch_norm(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
self.assertTrue(flag, resp)
# 'can_be_fused': False for BatchNorm
def test_bn_decomposition_2(self):
graph = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'bn_op'),
('const_bn_const', 'bn_const'),
('const_bn_beta', 'bn_beta'),
('const_bn_mean', 'bn_mean'),
('const_bn_var', 'bn_var'),
('bn_const', 'bn_op'),
('bn_beta', 'bn_op'),
('bn_mean', 'bn_op'),
('bn_var', 'bn_op'),
('bn_op', 'bn_data'),
('concat', 'concat_data'),
('bn_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'bn_op': {'eps': 1.2, 'can_be_fused': False},
'bn_const': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_beta': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_mean': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_var': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'bn_data': {'shape': np.array([1, 227, 227, 3])},
'concat_data': {}
})
graph_ref = build_graph(nodes_attributes,
[('placeholder_1', 'placeholder_1_data'),
('placeholder_1_data', 'mul_1'),
('const_mul_1_w', 'mul_1_w'),
('mul_1_w', 'mul_1'),
('mul_1', 'mul_1_data'),
('mul_1_data', 'add_1'),
('const_add_1_w', 'add_1_w'),
('add_1_w', 'add_1'),
('add_1', 'add_1_data'),
('add_1_data', 'mul_2'),
('const_mul_2_w', 'mul_2_w'),
('mul_2_w', 'mul_2'),
('mul_2', 'mul_2_data'),
('mul_2_data', 'add_2'),
('const_add_2_w', 'add_2_w'),
('add_2_w', 'add_2'),
('add_2', 'add_2_data'),
('concat', 'concat_data'),
('add_2_data', 'concat'),
('concat_data', 'op_output')
],
{'placeholder_1_data': {'shape': np.array([1, 227, 227, 3])},
'const_mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'mul_1_w': {'shape': np.array([3]),
'value': np.array([0.67419986, 0.55901699, 0.48795004])},
'const_mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'mul_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'const_add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'add_1_w': {'shape': np.array([3]),
'value': np.array([-0.67419986, -1.11803399, -1.46385011])},
'const_add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_w': {'shape': np.array([3]), 'value': np.array([1, 2, 3])},
'add_2_data': {'shape': np.array([1, 227, 227, 3])},
'mul_1': {'can_be_fused': False},
'mul_2': {'can_be_fused': False},
'add_1': {'can_be_fused': False},
'add_2': {'can_be_fused': False},
'concat_data': {}
})
graph.graph['layout'] = 'NHWC'
convert_batch_norm(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'concat_data')
self.assertTrue(flag, resp) | 0.557966 | 0.396068 |
import six
import tensorflow as tf
from deeplab import common
from deeplab import model
from deeplab.datasets import segmentation_dataset
from deeplab.utils import input_generator
from deeplab.utils import train_utils
from deployment import model_deploy
import numpy as np
slim = tf.contrib.slim
prefetch_queue = slim.prefetch_queue
flags = tf.app.flags
FLAGS = flags.FLAGS
# Settings for multi-GPUs/multi-replicas training.
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy.')
flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.')
flags.DEFINE_integer('num_replicas', 1, 'Number of worker replicas.')
flags.DEFINE_integer('startup_delay_steps', 15,
'Number of training steps between replicas startup.')
flags.DEFINE_integer('num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then '
'the parameters are handled locally by the worker.')
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
flags.DEFINE_integer('task', 0, 'The task ID.')
# Settings for logging.
flags.DEFINE_string('train_logdir', None,
'Where the checkpoint and logs are stored.')
flags.DEFINE_integer('log_steps', 10,
'Display logging information at every log_steps.')
flags.DEFINE_integer('save_interval_secs', 1200,
'How often, in seconds, we save the model to disk.')
flags.DEFINE_integer('save_summaries_secs', 600,
'How often, in seconds, we compute the summaries.')
flags.DEFINE_boolean('save_summaries_images', False,
'Save sample inputs, labels, and semantic predictions as '
'images to summary.')
# Settings for training strategy.
flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'],
'Learning rate policy for training.')
# Use 0.007 when training on PASCAL augmented training set, train_aug. When
# fine-tuning on PASCAL trainval set, use learning rate=0.0001.
flags.DEFINE_float('base_learning_rate', .0001,
'The base learning rate for model training.')
flags.DEFINE_float('learning_rate_decay_factor', 0.1,
'The rate to decay the base learning rate.')
flags.DEFINE_integer('learning_rate_decay_step', 2000,
'Decay the base learning rate at a fixed step.')
flags.DEFINE_float('learning_power', 0.9,
'The power value used in the poly learning policy.')
flags.DEFINE_integer('training_number_of_steps', 30000,
'The number of steps used for training')
flags.DEFINE_float('momentum', 0.9, 'The momentum value to use')
# When fine_tune_batch_norm=True, use at least batch size larger than 12
# (batch size more than 16 is better). Otherwise, one could use smaller batch
# size and set fine_tune_batch_norm=False.
flags.DEFINE_integer('train_batch_size', 8,
'The number of images in each batch during training.')
# For weight_decay, use 0.00004 for MobileNet-V2 or Xcpetion model variants.
# Use 0.0001 for ResNet model variants.
flags.DEFINE_float('weight_decay', 0.00004,
'The value of the weight decay for training.')
flags.DEFINE_multi_integer('train_crop_size', [513, 513],
'Image crop size [height, width] during training.')
flags.DEFINE_float('last_layer_gradient_multiplier', 1.0,
'The gradient multiplier for last layers, which is used to '
'boost the gradient of last layers if the value > 1.')
flags.DEFINE_boolean('upsample_logits', True,
'Upsample logits during training.')
# Settings for fine-tuning the network.
flags.DEFINE_string('tf_initial_checkpoint', None,
'The initial checkpoint in tensorflow format.')
# Set to False if one does not want to re-use the trained classifier weights.
flags.DEFINE_boolean('initialize_last_layer', False,
'Initialize the last layer.')
flags.DEFINE_boolean('last_layers_contain_logits_only', False,
'Only consider logits as last layers or not.')
flags.DEFINE_integer('slow_start_step', 0,
'Training model with small learning rate for few steps.')
flags.DEFINE_float('slow_start_learning_rate', 1e-4,
'Learning rate employed during slow start.')
# Set to True if one wants to fine-tune the batch norm parameters in DeepLabv3.
# Set to False and use small batch size to save GPU memory.
flags.DEFINE_boolean('fine_tune_batch_norm', True,
'Fine tune the batch norm parameters or not.')
flags.DEFINE_float('min_scale_factor', 0.5,
'Mininum scale factor for data augmentation.')
flags.DEFINE_float('max_scale_factor', 2.,
'Maximum scale factor for data augmentation.')
flags.DEFINE_float('scale_factor_step_size', 0.25,
'Scale factor step size for data augmentation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Dataset settings.
flags.DEFINE_string('dataset', 'pascal_voc_seg',
'Name of the segmentation dataset.')
flags.DEFINE_string('train_split', 'train',
'Which split of the dataset to be used for training')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label):
"""Builds a clone of DeepLab.
Args:
inputs_queue: A prefetch queue for images and labels.
outputs_to_num_classes: A map from output type to the number of classes.
For example, for the task of semantic segmentation with 21 semantic
classes, we would have outputs_to_num_classes['semantic'] = 21.
ignore_label: Ignore label.
Returns:
A map of maps from output_type (e.g., semantic prediction) to a
dictionary of multi-scale logits names to logits. For each output_type,
the dictionary has keys which correspond to the scales and values which
correspond to the logits. For example, if `scales` equals [1.0, 1.5],
then the keys would include 'merged_logits', 'logits_1.00' and
'logits_1.50'.
"""
samples = inputs_queue.dequeue()
# Add name to input and label nodes so we can add to summary.
samples[common.IMAGE] = tf.identity(
samples[common.IMAGE], name=common.IMAGE)
samples[common.LABEL] = tf.identity(
samples[common.LABEL], name=common.LABEL)
model_options = common.ModelOptions(
outputs_to_num_classes=outputs_to_num_classes,
crop_size=FLAGS.train_crop_size,
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
print('model options: ', model_options)
outputs_to_scales_to_logits = model.multi_scale_logits(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid,
weight_decay=FLAGS.weight_decay,
is_training=True,
fine_tune_batch_norm=FLAGS.fine_tune_batch_norm)
# Add name to graph node so we can add to summary.
output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE]
output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity(
output_type_dict[model.MERGED_LOGITS_SCOPE],
name=common.OUTPUT_TYPE)
for output, num_classes in six.iteritems(outputs_to_num_classes):
train_utils.my_mixed_loss(
outputs_to_scales_to_logits[output],
samples[common.LABEL],
num_classes,
ignore_label,
loss_weight=1.0,
upsample_logits=FLAGS.upsample_logits,
scope=output)
return outputs_to_scales_to_logits
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Set up deployment (i.e., multi-GPUs and/or multi-replicas).
config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.num_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Split the batch across GPUs.
assert FLAGS.train_batch_size % config.num_clones == 0, (
'Training batch size not divisble by number of clones (GPUs).')
clone_batch_size = FLAGS.train_batch_size // config.num_clones
# Get dataset-dependent information.
dataset = segmentation_dataset.get_dataset(
FLAGS.dataset, FLAGS.train_split, dataset_dir=FLAGS.dataset_dir)
tf.gfile.MakeDirs(FLAGS.train_logdir)
tf.logging.info('Training on %s set', FLAGS.train_split)
with tf.Graph().as_default() as graph:
with tf.device(config.inputs_device()):
samples = input_generator.get(
dataset,
FLAGS.train_crop_size,
clone_batch_size,
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
min_scale_factor=FLAGS.min_scale_factor,
max_scale_factor=FLAGS.max_scale_factor,
scale_factor_step_size=FLAGS.scale_factor_step_size,
dataset_split=FLAGS.train_split,
is_training=True,
model_variant=FLAGS.model_variant)
inputs_queue = prefetch_queue.prefetch_queue(
samples, capacity=128 * config.num_clones)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = tf.train.get_or_create_global_step()
# Define the model and create clones.
model_fn = _build_deeplab
model_args = (inputs_queue, {
common.OUTPUT_TYPE: dataset.num_classes
}, dataset.ignore_label)
clones = model_deploy.create_clones(config, model_fn, args=model_args)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
first_clone_scope = config.clone_scope(0)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for model variables.
for model_var in slim.get_model_variables():
summaries.add(tf.summary.histogram(model_var.op.name, model_var))
# Add summaries for images, labels, semantic predictions
if FLAGS.save_summaries_images:
summary_image = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.IMAGE)).strip('/'))
summaries.add(
tf.summary.image('samples/%s' % common.IMAGE, summary_image))
first_clone_label = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.LABEL)).strip('/'))
# Scale up summary image pixel values for better visualization.
pixel_scaling = max(1, 255 // dataset.num_classes)
summary_label = tf.cast(first_clone_label * pixel_scaling, tf.uint8)
summaries.add(
tf.summary.image('samples/%s' % common.LABEL, summary_label))
first_clone_output = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.OUTPUT_TYPE)).strip('/'))
predictions = tf.expand_dims(tf.argmax(first_clone_output, 3), -1)
summary_predictions = tf.cast(predictions * pixel_scaling, tf.uint8)
summaries.add(
tf.summary.image(
'samples/%s' % common.OUTPUT_TYPE, summary_predictions))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Build the optimizer based on the device specification.
with tf.device(config.optimizer_device()):
learning_rate = train_utils.get_model_learning_rate(
FLAGS.learning_policy, FLAGS.base_learning_rate,
FLAGS.learning_rate_decay_step, FLAGS.learning_rate_decay_factor,
FLAGS.training_number_of_steps, FLAGS.learning_power,
FLAGS.slow_start_step, FLAGS.slow_start_learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
# add the shit to debugging breakpoint to print out
train_utils.addc(tf.identity(learning_rate, 'learning_rate'))
startup_delay_steps = FLAGS.task * FLAGS.startup_delay_steps
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
with tf.device(config.variables_device()):
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, optimizer)
total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.')
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Modify the gradients for biases and last layer variables.
last_layers = model.get_extra_layer_scopes(
FLAGS.last_layers_contain_logits_only)
grad_mult = train_utils.get_model_gradient_multipliers(
last_layers, FLAGS.last_layer_gradient_multiplier)
if grad_mult:
grads_and_vars = slim.learning.multiply_gradients(
grads_and_vars, grad_mult)
# Create gradient update op.
grad_updates = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
# print the number of parameters for the model
print(tf.trainable_variables())
print('[DEBUGGING]::the number of parameters: ', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# add slim summaries to print out debugging breakpoints
for probe in tf.get_collection("debugging"):
slim.summaries.add_scalar_summary(probe, name=probe.op.name, print_summary=True)
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(
tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries))
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
# Start the training.
slim.learning.train(
train_tensor,
logdir=FLAGS.train_logdir,
log_every_n_steps=FLAGS.log_steps,
master=FLAGS.master,
number_of_steps=FLAGS.training_number_of_steps,
is_chief=(FLAGS.task == 0),
session_config=session_config,
startup_delay_steps=startup_delay_steps,
init_fn=train_utils.get_model_init_fn(
FLAGS.train_logdir,
FLAGS.tf_initial_checkpoint,
FLAGS.initialize_last_layer,
last_layers,
ignore_missing_vars=True),
summary_op=summary_op,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
if __name__ == '__main__':
flags.mark_flag_as_required('train_logdir')
flags.mark_flag_as_required('tf_initial_checkpoint')
flags.mark_flag_as_required('dataset_dir')
tf.app.run() | research/deeplab/train.py | import six
import tensorflow as tf
from deeplab import common
from deeplab import model
from deeplab.datasets import segmentation_dataset
from deeplab.utils import input_generator
from deeplab.utils import train_utils
from deployment import model_deploy
import numpy as np
slim = tf.contrib.slim
prefetch_queue = slim.prefetch_queue
flags = tf.app.flags
FLAGS = flags.FLAGS
# Settings for multi-GPUs/multi-replicas training.
flags.DEFINE_integer('num_clones', 1, 'Number of clones to deploy.')
flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.')
flags.DEFINE_integer('num_replicas', 1, 'Number of worker replicas.')
flags.DEFINE_integer('startup_delay_steps', 15,
'Number of training steps between replicas startup.')
flags.DEFINE_integer('num_ps_tasks', 0,
'The number of parameter servers. If the value is 0, then '
'the parameters are handled locally by the worker.')
flags.DEFINE_string('master', '', 'BNS name of the tensorflow server')
flags.DEFINE_integer('task', 0, 'The task ID.')
# Settings for logging.
flags.DEFINE_string('train_logdir', None,
'Where the checkpoint and logs are stored.')
flags.DEFINE_integer('log_steps', 10,
'Display logging information at every log_steps.')
flags.DEFINE_integer('save_interval_secs', 1200,
'How often, in seconds, we save the model to disk.')
flags.DEFINE_integer('save_summaries_secs', 600,
'How often, in seconds, we compute the summaries.')
flags.DEFINE_boolean('save_summaries_images', False,
'Save sample inputs, labels, and semantic predictions as '
'images to summary.')
# Settings for training strategy.
flags.DEFINE_enum('learning_policy', 'poly', ['poly', 'step'],
'Learning rate policy for training.')
# Use 0.007 when training on PASCAL augmented training set, train_aug. When
# fine-tuning on PASCAL trainval set, use learning rate=0.0001.
flags.DEFINE_float('base_learning_rate', .0001,
'The base learning rate for model training.')
flags.DEFINE_float('learning_rate_decay_factor', 0.1,
'The rate to decay the base learning rate.')
flags.DEFINE_integer('learning_rate_decay_step', 2000,
'Decay the base learning rate at a fixed step.')
flags.DEFINE_float('learning_power', 0.9,
'The power value used in the poly learning policy.')
flags.DEFINE_integer('training_number_of_steps', 30000,
'The number of steps used for training')
flags.DEFINE_float('momentum', 0.9, 'The momentum value to use')
# When fine_tune_batch_norm=True, use at least batch size larger than 12
# (batch size more than 16 is better). Otherwise, one could use smaller batch
# size and set fine_tune_batch_norm=False.
flags.DEFINE_integer('train_batch_size', 8,
'The number of images in each batch during training.')
# For weight_decay, use 0.00004 for MobileNet-V2 or Xcpetion model variants.
# Use 0.0001 for ResNet model variants.
flags.DEFINE_float('weight_decay', 0.00004,
'The value of the weight decay for training.')
flags.DEFINE_multi_integer('train_crop_size', [513, 513],
'Image crop size [height, width] during training.')
flags.DEFINE_float('last_layer_gradient_multiplier', 1.0,
'The gradient multiplier for last layers, which is used to '
'boost the gradient of last layers if the value > 1.')
flags.DEFINE_boolean('upsample_logits', True,
'Upsample logits during training.')
# Settings for fine-tuning the network.
flags.DEFINE_string('tf_initial_checkpoint', None,
'The initial checkpoint in tensorflow format.')
# Set to False if one does not want to re-use the trained classifier weights.
flags.DEFINE_boolean('initialize_last_layer', False,
'Initialize the last layer.')
flags.DEFINE_boolean('last_layers_contain_logits_only', False,
'Only consider logits as last layers or not.')
flags.DEFINE_integer('slow_start_step', 0,
'Training model with small learning rate for few steps.')
flags.DEFINE_float('slow_start_learning_rate', 1e-4,
'Learning rate employed during slow start.')
# Set to True if one wants to fine-tune the batch norm parameters in DeepLabv3.
# Set to False and use small batch size to save GPU memory.
flags.DEFINE_boolean('fine_tune_batch_norm', True,
'Fine tune the batch norm parameters or not.')
flags.DEFINE_float('min_scale_factor', 0.5,
'Mininum scale factor for data augmentation.')
flags.DEFINE_float('max_scale_factor', 2.,
'Maximum scale factor for data augmentation.')
flags.DEFINE_float('scale_factor_step_size', 0.25,
'Scale factor step size for data augmentation.')
# For `xception_65`, use atrous_rates = [12, 24, 36] if output_stride = 8, or
# rates = [6, 12, 18] if output_stride = 16. For `mobilenet_v2`, use None. Note
# one could use different atrous_rates/output_stride during training/evaluation.
flags.DEFINE_multi_integer('atrous_rates', None,
'Atrous rates for atrous spatial pyramid pooling.')
flags.DEFINE_integer('output_stride', 16,
'The ratio of input to output spatial resolution.')
# Dataset settings.
flags.DEFINE_string('dataset', 'pascal_voc_seg',
'Name of the segmentation dataset.')
flags.DEFINE_string('train_split', 'train',
'Which split of the dataset to be used for training')
flags.DEFINE_string('dataset_dir', None, 'Where the dataset reside.')
def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label):
"""Builds a clone of DeepLab.
Args:
inputs_queue: A prefetch queue for images and labels.
outputs_to_num_classes: A map from output type to the number of classes.
For example, for the task of semantic segmentation with 21 semantic
classes, we would have outputs_to_num_classes['semantic'] = 21.
ignore_label: Ignore label.
Returns:
A map of maps from output_type (e.g., semantic prediction) to a
dictionary of multi-scale logits names to logits. For each output_type,
the dictionary has keys which correspond to the scales and values which
correspond to the logits. For example, if `scales` equals [1.0, 1.5],
then the keys would include 'merged_logits', 'logits_1.00' and
'logits_1.50'.
"""
samples = inputs_queue.dequeue()
# Add name to input and label nodes so we can add to summary.
samples[common.IMAGE] = tf.identity(
samples[common.IMAGE], name=common.IMAGE)
samples[common.LABEL] = tf.identity(
samples[common.LABEL], name=common.LABEL)
model_options = common.ModelOptions(
outputs_to_num_classes=outputs_to_num_classes,
crop_size=FLAGS.train_crop_size,
atrous_rates=FLAGS.atrous_rates,
output_stride=FLAGS.output_stride)
print('model options: ', model_options)
outputs_to_scales_to_logits = model.multi_scale_logits(
samples[common.IMAGE],
model_options=model_options,
image_pyramid=FLAGS.image_pyramid,
weight_decay=FLAGS.weight_decay,
is_training=True,
fine_tune_batch_norm=FLAGS.fine_tune_batch_norm)
# Add name to graph node so we can add to summary.
output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE]
output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity(
output_type_dict[model.MERGED_LOGITS_SCOPE],
name=common.OUTPUT_TYPE)
for output, num_classes in six.iteritems(outputs_to_num_classes):
train_utils.my_mixed_loss(
outputs_to_scales_to_logits[output],
samples[common.LABEL],
num_classes,
ignore_label,
loss_weight=1.0,
upsample_logits=FLAGS.upsample_logits,
scope=output)
return outputs_to_scales_to_logits
def main(unused_argv):
tf.logging.set_verbosity(tf.logging.INFO)
# Set up deployment (i.e., multi-GPUs and/or multi-replicas).
config = model_deploy.DeploymentConfig(
num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=FLAGS.task,
num_replicas=FLAGS.num_replicas,
num_ps_tasks=FLAGS.num_ps_tasks)
# Split the batch across GPUs.
assert FLAGS.train_batch_size % config.num_clones == 0, (
'Training batch size not divisble by number of clones (GPUs).')
clone_batch_size = FLAGS.train_batch_size // config.num_clones
# Get dataset-dependent information.
dataset = segmentation_dataset.get_dataset(
FLAGS.dataset, FLAGS.train_split, dataset_dir=FLAGS.dataset_dir)
tf.gfile.MakeDirs(FLAGS.train_logdir)
tf.logging.info('Training on %s set', FLAGS.train_split)
with tf.Graph().as_default() as graph:
with tf.device(config.inputs_device()):
samples = input_generator.get(
dataset,
FLAGS.train_crop_size,
clone_batch_size,
min_resize_value=FLAGS.min_resize_value,
max_resize_value=FLAGS.max_resize_value,
resize_factor=FLAGS.resize_factor,
min_scale_factor=FLAGS.min_scale_factor,
max_scale_factor=FLAGS.max_scale_factor,
scale_factor_step_size=FLAGS.scale_factor_step_size,
dataset_split=FLAGS.train_split,
is_training=True,
model_variant=FLAGS.model_variant)
inputs_queue = prefetch_queue.prefetch_queue(
samples, capacity=128 * config.num_clones)
# Create the global step on the device storing the variables.
with tf.device(config.variables_device()):
global_step = tf.train.get_or_create_global_step()
# Define the model and create clones.
model_fn = _build_deeplab
model_args = (inputs_queue, {
common.OUTPUT_TYPE: dataset.num_classes
}, dataset.ignore_label)
clones = model_deploy.create_clones(config, model_fn, args=model_args)
# Gather update_ops from the first clone. These contain, for example,
# the updates for the batch_norm variables created by model_fn.
first_clone_scope = config.clone_scope(0)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
# Gather initial summaries.
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
# Add summaries for model variables.
for model_var in slim.get_model_variables():
summaries.add(tf.summary.histogram(model_var.op.name, model_var))
# Add summaries for images, labels, semantic predictions
if FLAGS.save_summaries_images:
summary_image = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.IMAGE)).strip('/'))
summaries.add(
tf.summary.image('samples/%s' % common.IMAGE, summary_image))
first_clone_label = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.LABEL)).strip('/'))
# Scale up summary image pixel values for better visualization.
pixel_scaling = max(1, 255 // dataset.num_classes)
summary_label = tf.cast(first_clone_label * pixel_scaling, tf.uint8)
summaries.add(
tf.summary.image('samples/%s' % common.LABEL, summary_label))
first_clone_output = graph.get_tensor_by_name(
('%s/%s:0' % (first_clone_scope, common.OUTPUT_TYPE)).strip('/'))
predictions = tf.expand_dims(tf.argmax(first_clone_output, 3), -1)
summary_predictions = tf.cast(predictions * pixel_scaling, tf.uint8)
summaries.add(
tf.summary.image(
'samples/%s' % common.OUTPUT_TYPE, summary_predictions))
# Add summaries for losses.
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
# Build the optimizer based on the device specification.
with tf.device(config.optimizer_device()):
learning_rate = train_utils.get_model_learning_rate(
FLAGS.learning_policy, FLAGS.base_learning_rate,
FLAGS.learning_rate_decay_step, FLAGS.learning_rate_decay_factor,
FLAGS.training_number_of_steps, FLAGS.learning_power,
FLAGS.slow_start_step, FLAGS.slow_start_learning_rate)
optimizer = tf.train.MomentumOptimizer(learning_rate, FLAGS.momentum)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
# add the shit to debugging breakpoint to print out
train_utils.addc(tf.identity(learning_rate, 'learning_rate'))
startup_delay_steps = FLAGS.task * FLAGS.startup_delay_steps
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
with tf.device(config.variables_device()):
total_loss, grads_and_vars = model_deploy.optimize_clones(
clones, optimizer)
total_loss = tf.check_numerics(total_loss, 'Loss is inf or nan.')
summaries.add(tf.summary.scalar('total_loss', total_loss))
# Modify the gradients for biases and last layer variables.
last_layers = model.get_extra_layer_scopes(
FLAGS.last_layers_contain_logits_only)
grad_mult = train_utils.get_model_gradient_multipliers(
last_layers, FLAGS.last_layer_gradient_multiplier)
if grad_mult:
grads_and_vars = slim.learning.multiply_gradients(
grads_and_vars, grad_mult)
# Create gradient update op.
grad_updates = optimizer.apply_gradients(
grads_and_vars, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
# print the number of parameters for the model
print(tf.trainable_variables())
print('[DEBUGGING]::the number of parameters: ', np.sum([np.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
with tf.control_dependencies([update_op]):
train_tensor = tf.identity(total_loss, name='train_op')
# add slim summaries to print out debugging breakpoints
for probe in tf.get_collection("debugging"):
slim.summaries.add_scalar_summary(probe, name=probe.op.name, print_summary=True)
# Add the summaries from the first clone. These contain the summaries
# created by model_fn and either optimize_clones() or _gather_clone_loss().
summaries |= set(
tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
# Merge all summaries together.
summary_op = tf.summary.merge(list(summaries))
# Soft placement allows placing on CPU ops without GPU implementation.
session_config = tf.ConfigProto(
allow_soft_placement=True, log_device_placement=False)
# Start the training.
slim.learning.train(
train_tensor,
logdir=FLAGS.train_logdir,
log_every_n_steps=FLAGS.log_steps,
master=FLAGS.master,
number_of_steps=FLAGS.training_number_of_steps,
is_chief=(FLAGS.task == 0),
session_config=session_config,
startup_delay_steps=startup_delay_steps,
init_fn=train_utils.get_model_init_fn(
FLAGS.train_logdir,
FLAGS.tf_initial_checkpoint,
FLAGS.initialize_last_layer,
last_layers,
ignore_missing_vars=True),
summary_op=summary_op,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs)
if __name__ == '__main__':
flags.mark_flag_as_required('train_logdir')
flags.mark_flag_as_required('tf_initial_checkpoint')
flags.mark_flag_as_required('dataset_dir')
tf.app.run() | 0.898254 | 0.383728 |
import random
def main():
Name= raw_input("Your name:")
phone= raw_input ("Your current phone:")
sp= smartphone()
cp= currentPhone()
p=output(Name,phone,cp,sp)
print p
def calculation(afford,use,superRandom,password):
return afford,use,superRandom,password
def smartphone():
afford= money()
use= rely()
superRandom= random1()
password= <PASSWORD>()
phoneModel = calculation(afford,use,superRandom,password)
if phoneModel <=3:
return "Nokia 3310"
if phoneModel <=5:
return " Iphone6s"
if phoneModel <= 8:
return " Samsung s7"
if phoneModel <= 10:
return " Samsung a7"
if phoneModel < 11:
return " Samsung Note5"
if phoneModel > 12:
return "Samsung Note4"
def rely():
rely= raw_input("""
What do typically rely on your smartphone?
a. playing games
b. calling people
c. for work
d. use for SNS(social network site)
(type a,b,c,d):
""")
if rely== "a":
return 1
elif rely== "b":
return 2
elif rely== "c":
return 3
elif rely== "d":
return 4
else:
random.randint(1,4)
def money():
money= raw_input("""
How much you can afford to spend on a smartphone?
a. 0.0 baht
b. no worried my parent can afford for me
c. money is not my problem
d. I rather got it for free
(type a,b,c,d):
""")
if money== "a" and "b" and "c" and "d":
return random.randint(1,3)
else:
random.randint(1,4)
def security():
security= raw_input("""
What do you preferred for phone security
a. fingerprint
b. passwords
c. none, simple slider
d. a pattern
(type a,b,c,d):
""")
if not security == "a" or security == "d":
return 0
elif security == "a" and "d":
return 3
else:
return random.random()
def random1():
random1= raw_input("""
How do you want to calculate your result?
a. use random to select your phone
b. I don't care
c. don't use random
d. Whatever
(type a,b,c,d):
""")
if random1== "a"or "b" or "c" or "d":
return random.random()/2
else:
return random.randint(1,4)
def currentPhone():
currentPhone=raw_input("""
Do you think your phone suit you?
(type y or n):
""")
if currentPhone == "y":
return True
else:
return False
def output(Name,phone,cp,sp):
out = """
Hello {},
You are currently using {}.
This program will help you to decide to buy a phone that best suits you.
I think my current phone suit me: {}
From the test that you took
The result is {}.
""". format(Name,phone,cp,sp)
return out
main() | conditionals.py | import random
def main():
Name= raw_input("Your name:")
phone= raw_input ("Your current phone:")
sp= smartphone()
cp= currentPhone()
p=output(Name,phone,cp,sp)
print p
def calculation(afford,use,superRandom,password):
return afford,use,superRandom,password
def smartphone():
afford= money()
use= rely()
superRandom= random1()
password= <PASSWORD>()
phoneModel = calculation(afford,use,superRandom,password)
if phoneModel <=3:
return "Nokia 3310"
if phoneModel <=5:
return " Iphone6s"
if phoneModel <= 8:
return " Samsung s7"
if phoneModel <= 10:
return " Samsung a7"
if phoneModel < 11:
return " Samsung Note5"
if phoneModel > 12:
return "Samsung Note4"
def rely():
rely= raw_input("""
What do typically rely on your smartphone?
a. playing games
b. calling people
c. for work
d. use for SNS(social network site)
(type a,b,c,d):
""")
if rely== "a":
return 1
elif rely== "b":
return 2
elif rely== "c":
return 3
elif rely== "d":
return 4
else:
random.randint(1,4)
def money():
money= raw_input("""
How much you can afford to spend on a smartphone?
a. 0.0 baht
b. no worried my parent can afford for me
c. money is not my problem
d. I rather got it for free
(type a,b,c,d):
""")
if money== "a" and "b" and "c" and "d":
return random.randint(1,3)
else:
random.randint(1,4)
def security():
security= raw_input("""
What do you preferred for phone security
a. fingerprint
b. passwords
c. none, simple slider
d. a pattern
(type a,b,c,d):
""")
if not security == "a" or security == "d":
return 0
elif security == "a" and "d":
return 3
else:
return random.random()
def random1():
random1= raw_input("""
How do you want to calculate your result?
a. use random to select your phone
b. I don't care
c. don't use random
d. Whatever
(type a,b,c,d):
""")
if random1== "a"or "b" or "c" or "d":
return random.random()/2
else:
return random.randint(1,4)
def currentPhone():
currentPhone=raw_input("""
Do you think your phone suit you?
(type y or n):
""")
if currentPhone == "y":
return True
else:
return False
def output(Name,phone,cp,sp):
out = """
Hello {},
You are currently using {}.
This program will help you to decide to buy a phone that best suits you.
I think my current phone suit me: {}
From the test that you took
The result is {}.
""". format(Name,phone,cp,sp)
return out
main() | 0.136551 | 0.121425 |
import tensorflow as tf
import tensorflow.contrib.slim as slim
import logging
from convlstm import ConvGRUCell, ConvLSTMCell
logger = logging.getLogger('mview3d.' + __name__)
def get_bias(shape, name='bias'):
return tf.get_variable(
name, shape=shape, initializer=tf.constant_initializer(0.0))
def get_weights(shape, name='weights'):
return tf.get_variable(
name, shape=shape, initializer=slim.initializers.xavier_initializer())
def convgru(grid, kernel=[3, 3, 3], filters=32):
bs, im_bs, h, w, d, ch = grid.get_shape().as_list()
conv_gru = ConvGRUCell(
shape=[h, w, d],
initializer=slim.initializers.xavier_initializer(),
kernel=kernel,
filters=filters)
seq_length = [im_bs for _ in range(bs)]
outputs, states = tf.nn.dynamic_rnn(
conv_gru,
grid,
parallel_iterations=64,
sequence_length=seq_length,
dtype=tf.float32,
time_major=False)
return outputs, states
def convlstm(grid, kernel=[3, 3, 3], filters=32):
bs, im_bs, h, w, d, ch = grid.get_shape().as_list()
conv_lstm = ConvLSTMCell(
shape=[h, w, d],
initializer=slim.initializers.xavier_initializer(),
kernel=kernel,
filters=filters)
seq_length = [im_bs for _ in range(bs)]
outputs, states = tf.nn.dynamic_rnn(
conv_lstm,
grid,
parallel_iterations=64,
sequence_length=seq_length,
dtype=tf.float32,
time_major=False)
return outputs, states
conv_rnns = {'gru': convgru, 'lstm': convlstm}
def instance_norm(x):
epsilon = 1e-5
x_shape = x.get_shape().as_list()
if len(x_shape) == 4:
axis = [1, 2]
elif len(x_shape) == 5:
axis = [1, 2, 3]
else:
logger.error(
'Instance norm not supported for tensor rank %d' % len(x_shape))
with tf.variable_scope('InstanceNorm'):
mean, var = tf.nn.moments(x, axis, keep_dims=True)
beta = get_bias([x_shape[-1]])
return tf.nn.batch_normalization(
x, mean, var, offset=beta, scale=None, variance_epsilon=epsilon)
def deconv3d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
activation=tf.nn.relu,
mode="TRAIN"):
bs, h, w, d, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, fsize, ch, in_ch]
out_shape = [bs, h * stride, w * stride, d * stride, ch]
stride = [1, stride, stride, stride, 1]
with tf.variable_scope(name):
if activation is not None:
X = activation(X)
params = get_weights(filt_shape)
X = tf.nn.conv3d_transpose(X, params, out_shape, stride, padding)
if norm is None:
bias_dim = [filt_shape[-2]]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def conv3d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
activation=tf.nn.relu,
mode="TRAIN"):
bs, h, w, d, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, fsize, in_ch, ch]
stride = [1, stride, stride, stride, 1]
with tf.variable_scope(name):
if activation is not None:
X = activation(X)
params = get_weights(filt_shape)
X = tf.nn.conv3d(X, params, stride, padding)
if norm is None:
bias_dim = [filt_shape[-1]]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def separable_conv2d(name,
X,
fsize,
ch_mult,
out_ch,
stride=2,
norm=None,
padding="SAME",
act=tf.nn.relu,
mode="TRAIN"):
bs, h, w, in_ch = tf_static_shape(X)
depth_filt_shape = [fsize, fsize, in_ch, ch_mult]
point_filt_shape = [1, 1, in_ch * ch_mult, out_ch]
stride = [1, stride, stride, 1]
with tf.variable_scope(name):
if act is not None:
X = act(X)
params_depth = get_weights(depth_filt_shape, name='depth_weights')
params_pt = get_weights(point_filt_shape, name='pt_weights')
X = tf.nn.depthwise_conv2d(X, params_depth, stride, padding)
X = tf.nn.conv2d(X, params_pt, stride, padding)
if norm is None:
bias_dim = [out_ch]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def conv2d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
act=tf.nn.relu,
mode="TRAIN"):
bs, h, w, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, in_ch, ch]
stride = [1, stride, stride, 1]
with tf.variable_scope(name):
if act is not None:
X = act(X)
params = get_weights(filt_shape)
X = tf.nn.conv2d(X, params, stride, padding)
if norm is None:
bias_dim = [filt_shape[-1]]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def resize_conv2d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
act=tf.nn.relu,
mode="TRAIN"):
bs, h, w, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, in_ch, ch]
new_h, new_w = h * stride, w * stride
conv_stride = [1, 1, 1, 1]
with tf.variable_scope(name):
if act is not None:
X = act(X)
X = tf.image.resize_nearest_neighbor(X, [new_h, new_w])
params = get_weights(filt_shape)
X = tf.nn.conv2d(X, params, conv_stride, padding)
if norm is None:
bias_dim = [ch]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def deconv2d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
act=tf.nn.relu,
mode="TRAIN"):
bs, h, w, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, ch, in_ch]
out_shape = [bs, h * stride, w * stride, ch]
stride = [1, stride, stride, 1]
with tf.variable_scope(name):
if act is not None:
X = act(X)
params = get_weights(filt_shape)
X = tf.nn.conv2d_transpose(X, params, out_shape, stride, padding)
if norm is None:
bias_dim = [filt_shape[-2]]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def residual(x, channels=32, norm="IN", scope='res'):
with tf.variable_scope(scope):
res_x = conv2d(
scope + '_1', x, 3, channels, stride=1, norm=norm, act=None)
res_x = conv2d(scope + '_2', res_x, 3, channels, stride=1, norm=norm)
x_skip = conv2d(scope + '_s', x, 1, channels, stride=1, norm=norm)
return res_x + x_skip
def fully_connected(name, X, dim, activation=tf.nn.relu):
bs = X.get_shape().as_list()[0]
with tf.variable_scope(name):
if activation is not None:
X = activation(X)
X = tf.reshape(X, [bs, -1])
wshape = (X.get_shape().as_list()[-1], dim)
params = get_weights(wshape)
X = tf.matmul(X, params)
X = tf.nn.bias_add(X, get_bias(dim))
return X
def nearest3(grid, idx, clip=False):
with tf.variable_scope('NearestInterp'):
_, h, w, d, f = grid.get_shape().as_list()
x, y, z = idx[:, 1], idx[:, 2], idx[:, 3]
g_val = tf.gather_nd(grid, tf.cast(tf.round(idx), 'int32'))
if clip:
x_inv = tf.logical_or(x < 0, x > h - 1)
y_inv = tf.logical_or(y < 0, y > w - 1)
z_inv = tf.logical_or(z < 0, x > d - 1)
valid_idx = 1 - \
tf.to_float(tf.logical_or(tf.logical_or(x_inv, y_inv), z_inv))
g_val = g_val * valid_idx[tf.newaxis, ...]
return g_val
def proj_slice(net,
grid,
K,
R,
proj_size=224,
samples=64,
min_z=1.0,
max_z=3.0):
'''grid = nv grids, R = nv x nr rotation matrices, '''
''' R = (bs, im, 3, 4), K = (bs, im, 3, 3), grid = (bs, im, h, w, d, ch)'''
rsz_factor = float(proj_size) / net.im_h
K = K * rsz_factor
K_shape = tf_static_shape(K)
bs, im_bs, h, w, d, ch = tf_static_shape(grid)
print(bs, im_bs, h, w, d, ch)
npix = proj_size**2
with tf.variable_scope('ProjSlice'):
# Setup dimensions
with tf.name_scope('PixelCenters'):
# Setup image grids to unproject along rays
im_range = tf.range(0.5, proj_size, 1)
im_grid = tf.stack(tf.meshgrid(im_range, im_range))
rs_grid = tf.reshape(im_grid, [2, -1])
# Append rsz_factor to ensure that
rs_grid = tf.concat(
[rs_grid, tf.ones((1, npix)) * rsz_factor], axis=0)
rs_grid = tf.reshape(rs_grid, [1, 1, 3, npix])
rs_grid = tf.tile(rs_grid, [K_shape[0], K_shape[1], 1, 1])
with tf.name_scope('Im2Cam'):
# Compute Xc - points in camera frame
Xc = tf.matrix_triangular_solve(
K, rs_grid, lower=False, name='KinvX')
# Define z values of samples along ray
z_samples = tf.linspace(min_z, max_z, samples)
# Transform Xc to Xw using transpose of rotation matrix
Xc = repeat_tensor(Xc, samples, rep_dim=2)
Xc = Xc * z_samples[tf.newaxis, tf.newaxis, :, tf.newaxis,
tf.newaxis]
Xc = tf.concat(
[Xc, tf.ones([K_shape[0], K_shape[1], samples, 1, npix])],
axis=-2)
with tf.name_scope('Cam2World'):
# Construct [R^{T}|-R^{T}t]
Rt = tf.matrix_transpose(R[:, :, :, :3])
tr = tf.expand_dims(R[:, :, :, 3], axis=-1)
R_c2w = tf.concat([Rt, -tf.matmul(Rt, tr)], axis=3)
R_c2w = repeat_tensor(R_c2w, samples, rep_dim=2)
Xw = tf.matmul(R_c2w, Xc)
# Transform world points to grid locations to sample from
Xw = ((Xw - net.vmin) / (net.vmax - net.vmin)) * net.nvox
# bs, K_shape[1], samples, npix, 3
Xw = tf.transpose(Xw, [0, 1, 2, 4, 3])
Xw = repeat_tensor(Xw, im_bs, rep_dim=1)
with tf.name_scope('Interp'):
sample_grid = collapse_dims(grid)
print(sample_grid)
sample_locs = collapse_dims(Xw)
lshape = tf_static_shape(sample_locs)
vox_idx = tf.range(lshape[0])
vox_idx = repeat_tensor(vox_idx, lshape[1], rep_dim=1)
vox_idx = tf.reshape(vox_idx, [-1, 1])
vox_idx = repeat_tensor(vox_idx, samples * npix, rep_dim=1)
vox_idx = tf.reshape(vox_idx, [-1, 1])
sample_idx = tf.concat(
[tf.to_float(vox_idx),
tf.reshape(sample_locs, [-1, 3])],
axis=1)
g_val = nearest3(sample_grid, sample_idx)
g_val = tf.reshape(g_val, [
bs, im_bs, K_shape[1], samples, proj_size, proj_size, -1
])
ray_slices = tf.transpose(g_val, [0, 1, 2, 4, 5, 6, 3])
return ray_slices, z_samples
def proj_splat(net, feats, K, Rcam):
KRcam = tf.matmul(K, Rcam)
with tf.variable_scope('ProjSplat'):
nR, fh, fw, fdim = tf_static_shape(feats)
rsz_h = float(fh) / net.im_h
rsz_w = float(fw) / net.im_w
# Create voxel grid
with tf.name_scope('GridCenters'):
grid_range = tf.range(net.vmin + net.vsize / 2.0, net.vmax,
net.vsize)
net.grid = tf.stack(
tf.meshgrid(grid_range, grid_range, grid_range))
print("Grid range shape:", grid_range)
net.rs_grid = tf.reshape(net.grid, [3, -1])
nV = tf_static_shape(net.rs_grid)[1]
net.rs_grid = tf.concat([net.rs_grid, tf.ones([1, nV])], axis=0)
# Project grid
with tf.name_scope('World2Cam'):
im_p = tf.matmul(tf.reshape(KRcam, [-1, 4]), net.rs_grid)
im_x, im_y, im_z = im_p[::3, :], im_p[1::3, :], im_p[2::3, :]
im_x = (im_x / im_z) * rsz_w
im_y = (im_y / im_z) * rsz_h
net.im_p, net.im_x, net.im_y, net.im_z = im_p, im_x, im_y, im_z
# Bilinear interpolation
with tf.name_scope('BilinearInterp'):
im_x = tf.clip_by_value(im_x, 0, fw - 1)
im_y = tf.clip_by_value(im_y, 0, fh - 1)
im_x0 = tf.cast(tf.floor(im_x), 'int32')
im_x1 = im_x0 + 1
im_y0 = tf.cast(tf.floor(im_y), 'int32')
im_y1 = im_y0 + 1
im_x0_f, im_x1_f = tf.to_float(im_x0), tf.to_float(im_x1)
im_y0_f, im_y1_f = tf.to_float(im_y0), tf.to_float(im_y1)
ind_grid = tf.range(0, nR)
ind_grid = tf.expand_dims(ind_grid, 1)
im_ind = tf.tile(ind_grid, [1, nV])
def _get_gather_inds(x, y):
return tf.reshape(tf.stack([im_ind, y, x], axis=2), [-1, 3])
# Gather values
Ia = tf.gather_nd(feats, _get_gather_inds(im_x0, im_y0))
Ib = tf.gather_nd(feats, _get_gather_inds(im_x0, im_y1))
Ic = tf.gather_nd(feats, _get_gather_inds(im_x1, im_y0))
Id = tf.gather_nd(feats, _get_gather_inds(im_x1, im_y1))
# Calculate bilinear weights
wa = (im_x1_f - im_x) * (im_y1_f - im_y)
wb = (im_x1_f - im_x) * (im_y - im_y0_f)
wc = (im_x - im_x0_f) * (im_y1_f - im_y)
wd = (im_x - im_x0_f) * (im_y - im_y0_f)
wa, wb = tf.reshape(wa, [-1, 1]), tf.reshape(wb, [-1, 1])
wc, wd = tf.reshape(wc, [-1, 1]), tf.reshape(wd, [-1, 1])
net.wa, net.wb, net.wc, net.wd = wa, wb, wc, wd
net.Ia, net.Ib, net.Ic, net.Id = Ia, Ib, Ic, Id
Ibilin = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])
with tf.name_scope('AppendDepth'):
# Concatenate depth value along ray to feature
Ibilin = tf.concat(
[Ibilin, tf.reshape(im_z, [nV * nR, 1])], axis=1)
fdim = Ibilin.get_shape().as_list()[-1]
net.Ibilin = tf.reshape(Ibilin, [
net.batch_size, net.im_batch, net.nvox, net.nvox, net.nvox,
fdim
])
net.Ibilin = tf.transpose(net.Ibilin, [0, 1, 3, 2, 4, 5])
return net.Ibilin
def loss_l1(pred, gt):
return tf.losses.absolute_difference(gt, pred, scope='loss_l1')
def loss_ce(pred, gt_vox):
with tf.variable_scope('loss_ce'):
pred = tf.expand_dims(tf.reshape(pred, [-1]), axis=1)
gt_vox = tf.expand_dims(tf.reshape(gt_vox, [-1]), axis=1)
return tf.losses.sigmoid_cross_entropy(gt_vox, pred)
def concat_pool(feats):
batch_size = feats.get_shape().as_list()[0]
nvox = feats.get_shape().as_list()[2]
with tf.variable_scope('concat_pool'):
feats = tf.transpose(feats, [0, 5, 1, 2, 3, 4])
feats = tf.reshape(feats, [batch_size, -1, nvox, nvox, nvox])
feats = tf.transpose(feats, [0, 2, 3, 4, 1])
return feats
def form_image_grid(input_tensor, grid_shape, image_shape, num_channels):
"""Arrange a minibatch of images into a grid to form a single image.
Args:
input_tensor: Tensor. Minibatch of images to format, either 4D
([batch size, height, width, num_channels]) or flattened
([batch size, height * width * num_channels]).
grid_shape: Sequence of int. The shape of the image grid,
formatted as [grid_height, grid_width].
image_shape: Sequence of int. The shape of a single image,
formatted as [image_height, image_width].
num_channels: int. The number of channels in an image.
Returns:
Tensor representing a single image in which the input images have been
arranged into a grid.
Raises:
ValueError: The grid shape and minibatch size don't match, or the image
shape and number of channels are incompatible with the input tensor.
"""
if grid_shape[0] * grid_shape[1] != int(input_tensor.get_shape()[0]):
raise ValueError('Grid shape incompatible with minibatch size.')
if len(input_tensor.get_shape()) == 2:
num_features = image_shape[0] * image_shape[1] * num_channels
if int(input_tensor.get_shape()[1]) != num_features:
raise ValueError(
'Image shape and number of channels incompatible with '
'input tensor.')
elif len(input_tensor.get_shape()) == 4:
if (int(input_tensor.get_shape()[1]) != image_shape[0] or
int(input_tensor.get_shape()[2]) != image_shape[1] or
int(input_tensor.get_shape()[3]) != num_channels):
raise ValueError(
'Image shape and number of channels incompatible with'
'input tensor.')
else:
raise ValueError('Unrecognized input tensor format.')
height, width = grid_shape[0] * \
image_shape[0], grid_shape[1] * image_shape[1]
input_tensor = tf.reshape(input_tensor,
grid_shape + image_shape + [num_channels])
input_tensor = tf.transpose(input_tensor, [0, 1, 3, 2, 4])
input_tensor = tf.reshape(
input_tensor, [grid_shape[0], width, image_shape[0], num_channels])
input_tensor = tf.transpose(input_tensor, [0, 2, 1, 3])
input_tensor = tf.reshape(input_tensor, [1, height, width, num_channels])
return input_tensor
def voxel_views(in_vox, gh, gw, tsdf=False, pad=4, scope='voxel_views'):
def vis_tsdf(tv):
return 1.0 - tf.abs(tv) * 5.0
with tf.variable_scope(scope):
_, h, w, d, ch = in_vox.get_shape().as_list()
if not tsdf:
x_view = tf.reduce_max(in_vox, axis=1)
y_view = tf.reduce_max(in_vox, axis=2)
z_view = tf.reduce_max(in_vox, axis=3)
else:
x_view = vis_tsdf(in_vox[:, h / 2, :, :, :])
y_view = vis_tsdf(in_vox[:, :, w / 2, :, :])
z_view = vis_tsdf(in_vox[:, :, :, d / 2, :])
pad = [[0, 0], [pad, pad], [pad, pad], [0, 0]]
x_view = tf.pad(x_view, pad)
y_view = tf.pad(y_view, pad)
z_view = tf.pad(z_view, pad)
image_shape = x_view.get_shape().as_list()[1:3]
grid_shape = [gh, gw]
color_ch = 1
x_view = form_image_grid(x_view, grid_shape, image_shape, color_ch)
y_view = form_image_grid(y_view, grid_shape, image_shape, color_ch)
z_view = form_image_grid(z_view, grid_shape, image_shape, color_ch)
views = [
tf.cast(x_view * 255, tf.uint8),
tf.cast(y_view * 255, tf.uint8),
tf.cast(z_view * 255, tf.uint8)
]
return views
def im_views(ims, gh, gw, scope='im_views'):
with tf.variable_scope(scope):
_, _, h, w, ch = tf_static_shape(ims)
im_grid = form_image_grid(collapse_dims(ims), [gh, gw], [h, w], ch)
return tf.cast(im_grid * 255, tf.uint8)
def voxel_sum(net, tsdf=False):
vox_sum = []
pred_views = voxel_views(
collapse_dims(net.pred_vox),
net.batch_size,
net.im_batch,
tsdf,
scope='vox_pred')
gt_views = voxel_views(net.gt_vox, net.batch_size, 1, tsdf, scope='vox_gt')
caxis = 2
with tf.name_scope('voxel_vis'):
x_view = tf.concat([pred_views[0], gt_views[0]], axis=caxis)
vox_sum.append(tf.summary.image('x_view', x_view))
y_view = tf.concat([pred_views[1], gt_views[1]], axis=caxis)
vox_sum.append(tf.summary.image('y_view', y_view))
z_view = tf.concat([pred_views[2], gt_views[2]], axis=caxis)
vox_sum.append(tf.summary.image('z_view', z_view))
return tf.summary.merge(vox_sum)
def image_sum(im_tensor, nh, nw, tag='views'):
return tf.summary.image(tag + '_sum', im_views(im_tensor, nh, nw, tag))
def vis_depth(d, min_d=1, max_d=3, sc=10):
with tf.name_scope('vis_depth'):
d_alpha = tf.to_float(tf.logical_and(d < max_d, d > min_d))
d_v = d / sc * max_d
d_v = tf.concat([d_v, d_v, d_v, d_alpha], axis=-1)
return d_v
def depth_sum(depth_tensor, nh, nw, tag='depth_views'):
return tf.summary.image(tag + '_sum',
im_views(vis_depth(depth_tensor), nh, nw, tag))
def repeat_tensor(T, nrep, rep_dim=1):
repT = tf.expand_dims(T, rep_dim)
tile_dim = [1] * len(tf_static_shape(repT))
tile_dim[rep_dim] = nrep
repT = tf.tile(repT, tile_dim)
return repT
def collapse_dims(T):
shape = tf_static_shape(T)
return tf.reshape(T, [-1] + shape[2:])
def uncollapse_dims(T, s1, s2):
shape = tf_static_shape(T)
return tf.reshape(T, [s1, s2] + shape[1:])
def tf_static_shape(T):
return T.get_shape().as_list() | ops.py | import tensorflow as tf
import tensorflow.contrib.slim as slim
import logging
from convlstm import ConvGRUCell, ConvLSTMCell
logger = logging.getLogger('mview3d.' + __name__)
def get_bias(shape, name='bias'):
return tf.get_variable(
name, shape=shape, initializer=tf.constant_initializer(0.0))
def get_weights(shape, name='weights'):
return tf.get_variable(
name, shape=shape, initializer=slim.initializers.xavier_initializer())
def convgru(grid, kernel=[3, 3, 3], filters=32):
bs, im_bs, h, w, d, ch = grid.get_shape().as_list()
conv_gru = ConvGRUCell(
shape=[h, w, d],
initializer=slim.initializers.xavier_initializer(),
kernel=kernel,
filters=filters)
seq_length = [im_bs for _ in range(bs)]
outputs, states = tf.nn.dynamic_rnn(
conv_gru,
grid,
parallel_iterations=64,
sequence_length=seq_length,
dtype=tf.float32,
time_major=False)
return outputs, states
def convlstm(grid, kernel=[3, 3, 3], filters=32):
bs, im_bs, h, w, d, ch = grid.get_shape().as_list()
conv_lstm = ConvLSTMCell(
shape=[h, w, d],
initializer=slim.initializers.xavier_initializer(),
kernel=kernel,
filters=filters)
seq_length = [im_bs for _ in range(bs)]
outputs, states = tf.nn.dynamic_rnn(
conv_lstm,
grid,
parallel_iterations=64,
sequence_length=seq_length,
dtype=tf.float32,
time_major=False)
return outputs, states
conv_rnns = {'gru': convgru, 'lstm': convlstm}
def instance_norm(x):
epsilon = 1e-5
x_shape = x.get_shape().as_list()
if len(x_shape) == 4:
axis = [1, 2]
elif len(x_shape) == 5:
axis = [1, 2, 3]
else:
logger.error(
'Instance norm not supported for tensor rank %d' % len(x_shape))
with tf.variable_scope('InstanceNorm'):
mean, var = tf.nn.moments(x, axis, keep_dims=True)
beta = get_bias([x_shape[-1]])
return tf.nn.batch_normalization(
x, mean, var, offset=beta, scale=None, variance_epsilon=epsilon)
def deconv3d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
activation=tf.nn.relu,
mode="TRAIN"):
bs, h, w, d, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, fsize, ch, in_ch]
out_shape = [bs, h * stride, w * stride, d * stride, ch]
stride = [1, stride, stride, stride, 1]
with tf.variable_scope(name):
if activation is not None:
X = activation(X)
params = get_weights(filt_shape)
X = tf.nn.conv3d_transpose(X, params, out_shape, stride, padding)
if norm is None:
bias_dim = [filt_shape[-2]]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def conv3d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
activation=tf.nn.relu,
mode="TRAIN"):
bs, h, w, d, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, fsize, in_ch, ch]
stride = [1, stride, stride, stride, 1]
with tf.variable_scope(name):
if activation is not None:
X = activation(X)
params = get_weights(filt_shape)
X = tf.nn.conv3d(X, params, stride, padding)
if norm is None:
bias_dim = [filt_shape[-1]]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def separable_conv2d(name,
X,
fsize,
ch_mult,
out_ch,
stride=2,
norm=None,
padding="SAME",
act=tf.nn.relu,
mode="TRAIN"):
bs, h, w, in_ch = tf_static_shape(X)
depth_filt_shape = [fsize, fsize, in_ch, ch_mult]
point_filt_shape = [1, 1, in_ch * ch_mult, out_ch]
stride = [1, stride, stride, 1]
with tf.variable_scope(name):
if act is not None:
X = act(X)
params_depth = get_weights(depth_filt_shape, name='depth_weights')
params_pt = get_weights(point_filt_shape, name='pt_weights')
X = tf.nn.depthwise_conv2d(X, params_depth, stride, padding)
X = tf.nn.conv2d(X, params_pt, stride, padding)
if norm is None:
bias_dim = [out_ch]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def conv2d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
act=tf.nn.relu,
mode="TRAIN"):
bs, h, w, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, in_ch, ch]
stride = [1, stride, stride, 1]
with tf.variable_scope(name):
if act is not None:
X = act(X)
params = get_weights(filt_shape)
X = tf.nn.conv2d(X, params, stride, padding)
if norm is None:
bias_dim = [filt_shape[-1]]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def resize_conv2d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
act=tf.nn.relu,
mode="TRAIN"):
bs, h, w, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, in_ch, ch]
new_h, new_w = h * stride, w * stride
conv_stride = [1, 1, 1, 1]
with tf.variable_scope(name):
if act is not None:
X = act(X)
X = tf.image.resize_nearest_neighbor(X, [new_h, new_w])
params = get_weights(filt_shape)
X = tf.nn.conv2d(X, params, conv_stride, padding)
if norm is None:
bias_dim = [ch]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def deconv2d(name,
X,
fsize,
ch,
stride=2,
norm=None,
padding="SAME",
act=tf.nn.relu,
mode="TRAIN"):
bs, h, w, in_ch = tf_static_shape(X)
filt_shape = [fsize, fsize, ch, in_ch]
out_shape = [bs, h * stride, w * stride, ch]
stride = [1, stride, stride, 1]
with tf.variable_scope(name):
if act is not None:
X = act(X)
params = get_weights(filt_shape)
X = tf.nn.conv2d_transpose(X, params, out_shape, stride, padding)
if norm is None:
bias_dim = [filt_shape[-2]]
X = tf.nn.bias_add(X, get_bias(bias_dim))
elif norm == 'BN':
is_training = (True if mode == "TRAIN" else False)
X = slim.batch_norm(
X, is_training=is_training, updates_collections=None)
elif norm == 'IN':
X = instance_norm(X)
else:
logger.error('Invalid normalization! Choose from {None, BN, IN}')
return X
def residual(x, channels=32, norm="IN", scope='res'):
with tf.variable_scope(scope):
res_x = conv2d(
scope + '_1', x, 3, channels, stride=1, norm=norm, act=None)
res_x = conv2d(scope + '_2', res_x, 3, channels, stride=1, norm=norm)
x_skip = conv2d(scope + '_s', x, 1, channels, stride=1, norm=norm)
return res_x + x_skip
def fully_connected(name, X, dim, activation=tf.nn.relu):
bs = X.get_shape().as_list()[0]
with tf.variable_scope(name):
if activation is not None:
X = activation(X)
X = tf.reshape(X, [bs, -1])
wshape = (X.get_shape().as_list()[-1], dim)
params = get_weights(wshape)
X = tf.matmul(X, params)
X = tf.nn.bias_add(X, get_bias(dim))
return X
def nearest3(grid, idx, clip=False):
with tf.variable_scope('NearestInterp'):
_, h, w, d, f = grid.get_shape().as_list()
x, y, z = idx[:, 1], idx[:, 2], idx[:, 3]
g_val = tf.gather_nd(grid, tf.cast(tf.round(idx), 'int32'))
if clip:
x_inv = tf.logical_or(x < 0, x > h - 1)
y_inv = tf.logical_or(y < 0, y > w - 1)
z_inv = tf.logical_or(z < 0, x > d - 1)
valid_idx = 1 - \
tf.to_float(tf.logical_or(tf.logical_or(x_inv, y_inv), z_inv))
g_val = g_val * valid_idx[tf.newaxis, ...]
return g_val
def proj_slice(net,
grid,
K,
R,
proj_size=224,
samples=64,
min_z=1.0,
max_z=3.0):
'''grid = nv grids, R = nv x nr rotation matrices, '''
''' R = (bs, im, 3, 4), K = (bs, im, 3, 3), grid = (bs, im, h, w, d, ch)'''
rsz_factor = float(proj_size) / net.im_h
K = K * rsz_factor
K_shape = tf_static_shape(K)
bs, im_bs, h, w, d, ch = tf_static_shape(grid)
print(bs, im_bs, h, w, d, ch)
npix = proj_size**2
with tf.variable_scope('ProjSlice'):
# Setup dimensions
with tf.name_scope('PixelCenters'):
# Setup image grids to unproject along rays
im_range = tf.range(0.5, proj_size, 1)
im_grid = tf.stack(tf.meshgrid(im_range, im_range))
rs_grid = tf.reshape(im_grid, [2, -1])
# Append rsz_factor to ensure that
rs_grid = tf.concat(
[rs_grid, tf.ones((1, npix)) * rsz_factor], axis=0)
rs_grid = tf.reshape(rs_grid, [1, 1, 3, npix])
rs_grid = tf.tile(rs_grid, [K_shape[0], K_shape[1], 1, 1])
with tf.name_scope('Im2Cam'):
# Compute Xc - points in camera frame
Xc = tf.matrix_triangular_solve(
K, rs_grid, lower=False, name='KinvX')
# Define z values of samples along ray
z_samples = tf.linspace(min_z, max_z, samples)
# Transform Xc to Xw using transpose of rotation matrix
Xc = repeat_tensor(Xc, samples, rep_dim=2)
Xc = Xc * z_samples[tf.newaxis, tf.newaxis, :, tf.newaxis,
tf.newaxis]
Xc = tf.concat(
[Xc, tf.ones([K_shape[0], K_shape[1], samples, 1, npix])],
axis=-2)
with tf.name_scope('Cam2World'):
# Construct [R^{T}|-R^{T}t]
Rt = tf.matrix_transpose(R[:, :, :, :3])
tr = tf.expand_dims(R[:, :, :, 3], axis=-1)
R_c2w = tf.concat([Rt, -tf.matmul(Rt, tr)], axis=3)
R_c2w = repeat_tensor(R_c2w, samples, rep_dim=2)
Xw = tf.matmul(R_c2w, Xc)
# Transform world points to grid locations to sample from
Xw = ((Xw - net.vmin) / (net.vmax - net.vmin)) * net.nvox
# bs, K_shape[1], samples, npix, 3
Xw = tf.transpose(Xw, [0, 1, 2, 4, 3])
Xw = repeat_tensor(Xw, im_bs, rep_dim=1)
with tf.name_scope('Interp'):
sample_grid = collapse_dims(grid)
print(sample_grid)
sample_locs = collapse_dims(Xw)
lshape = tf_static_shape(sample_locs)
vox_idx = tf.range(lshape[0])
vox_idx = repeat_tensor(vox_idx, lshape[1], rep_dim=1)
vox_idx = tf.reshape(vox_idx, [-1, 1])
vox_idx = repeat_tensor(vox_idx, samples * npix, rep_dim=1)
vox_idx = tf.reshape(vox_idx, [-1, 1])
sample_idx = tf.concat(
[tf.to_float(vox_idx),
tf.reshape(sample_locs, [-1, 3])],
axis=1)
g_val = nearest3(sample_grid, sample_idx)
g_val = tf.reshape(g_val, [
bs, im_bs, K_shape[1], samples, proj_size, proj_size, -1
])
ray_slices = tf.transpose(g_val, [0, 1, 2, 4, 5, 6, 3])
return ray_slices, z_samples
def proj_splat(net, feats, K, Rcam):
KRcam = tf.matmul(K, Rcam)
with tf.variable_scope('ProjSplat'):
nR, fh, fw, fdim = tf_static_shape(feats)
rsz_h = float(fh) / net.im_h
rsz_w = float(fw) / net.im_w
# Create voxel grid
with tf.name_scope('GridCenters'):
grid_range = tf.range(net.vmin + net.vsize / 2.0, net.vmax,
net.vsize)
net.grid = tf.stack(
tf.meshgrid(grid_range, grid_range, grid_range))
print("Grid range shape:", grid_range)
net.rs_grid = tf.reshape(net.grid, [3, -1])
nV = tf_static_shape(net.rs_grid)[1]
net.rs_grid = tf.concat([net.rs_grid, tf.ones([1, nV])], axis=0)
# Project grid
with tf.name_scope('World2Cam'):
im_p = tf.matmul(tf.reshape(KRcam, [-1, 4]), net.rs_grid)
im_x, im_y, im_z = im_p[::3, :], im_p[1::3, :], im_p[2::3, :]
im_x = (im_x / im_z) * rsz_w
im_y = (im_y / im_z) * rsz_h
net.im_p, net.im_x, net.im_y, net.im_z = im_p, im_x, im_y, im_z
# Bilinear interpolation
with tf.name_scope('BilinearInterp'):
im_x = tf.clip_by_value(im_x, 0, fw - 1)
im_y = tf.clip_by_value(im_y, 0, fh - 1)
im_x0 = tf.cast(tf.floor(im_x), 'int32')
im_x1 = im_x0 + 1
im_y0 = tf.cast(tf.floor(im_y), 'int32')
im_y1 = im_y0 + 1
im_x0_f, im_x1_f = tf.to_float(im_x0), tf.to_float(im_x1)
im_y0_f, im_y1_f = tf.to_float(im_y0), tf.to_float(im_y1)
ind_grid = tf.range(0, nR)
ind_grid = tf.expand_dims(ind_grid, 1)
im_ind = tf.tile(ind_grid, [1, nV])
def _get_gather_inds(x, y):
return tf.reshape(tf.stack([im_ind, y, x], axis=2), [-1, 3])
# Gather values
Ia = tf.gather_nd(feats, _get_gather_inds(im_x0, im_y0))
Ib = tf.gather_nd(feats, _get_gather_inds(im_x0, im_y1))
Ic = tf.gather_nd(feats, _get_gather_inds(im_x1, im_y0))
Id = tf.gather_nd(feats, _get_gather_inds(im_x1, im_y1))
# Calculate bilinear weights
wa = (im_x1_f - im_x) * (im_y1_f - im_y)
wb = (im_x1_f - im_x) * (im_y - im_y0_f)
wc = (im_x - im_x0_f) * (im_y1_f - im_y)
wd = (im_x - im_x0_f) * (im_y - im_y0_f)
wa, wb = tf.reshape(wa, [-1, 1]), tf.reshape(wb, [-1, 1])
wc, wd = tf.reshape(wc, [-1, 1]), tf.reshape(wd, [-1, 1])
net.wa, net.wb, net.wc, net.wd = wa, wb, wc, wd
net.Ia, net.Ib, net.Ic, net.Id = Ia, Ib, Ic, Id
Ibilin = tf.add_n([wa * Ia, wb * Ib, wc * Ic, wd * Id])
with tf.name_scope('AppendDepth'):
# Concatenate depth value along ray to feature
Ibilin = tf.concat(
[Ibilin, tf.reshape(im_z, [nV * nR, 1])], axis=1)
fdim = Ibilin.get_shape().as_list()[-1]
net.Ibilin = tf.reshape(Ibilin, [
net.batch_size, net.im_batch, net.nvox, net.nvox, net.nvox,
fdim
])
net.Ibilin = tf.transpose(net.Ibilin, [0, 1, 3, 2, 4, 5])
return net.Ibilin
def loss_l1(pred, gt):
return tf.losses.absolute_difference(gt, pred, scope='loss_l1')
def loss_ce(pred, gt_vox):
with tf.variable_scope('loss_ce'):
pred = tf.expand_dims(tf.reshape(pred, [-1]), axis=1)
gt_vox = tf.expand_dims(tf.reshape(gt_vox, [-1]), axis=1)
return tf.losses.sigmoid_cross_entropy(gt_vox, pred)
def concat_pool(feats):
batch_size = feats.get_shape().as_list()[0]
nvox = feats.get_shape().as_list()[2]
with tf.variable_scope('concat_pool'):
feats = tf.transpose(feats, [0, 5, 1, 2, 3, 4])
feats = tf.reshape(feats, [batch_size, -1, nvox, nvox, nvox])
feats = tf.transpose(feats, [0, 2, 3, 4, 1])
return feats
def form_image_grid(input_tensor, grid_shape, image_shape, num_channels):
"""Arrange a minibatch of images into a grid to form a single image.
Args:
input_tensor: Tensor. Minibatch of images to format, either 4D
([batch size, height, width, num_channels]) or flattened
([batch size, height * width * num_channels]).
grid_shape: Sequence of int. The shape of the image grid,
formatted as [grid_height, grid_width].
image_shape: Sequence of int. The shape of a single image,
formatted as [image_height, image_width].
num_channels: int. The number of channels in an image.
Returns:
Tensor representing a single image in which the input images have been
arranged into a grid.
Raises:
ValueError: The grid shape and minibatch size don't match, or the image
shape and number of channels are incompatible with the input tensor.
"""
if grid_shape[0] * grid_shape[1] != int(input_tensor.get_shape()[0]):
raise ValueError('Grid shape incompatible with minibatch size.')
if len(input_tensor.get_shape()) == 2:
num_features = image_shape[0] * image_shape[1] * num_channels
if int(input_tensor.get_shape()[1]) != num_features:
raise ValueError(
'Image shape and number of channels incompatible with '
'input tensor.')
elif len(input_tensor.get_shape()) == 4:
if (int(input_tensor.get_shape()[1]) != image_shape[0] or
int(input_tensor.get_shape()[2]) != image_shape[1] or
int(input_tensor.get_shape()[3]) != num_channels):
raise ValueError(
'Image shape and number of channels incompatible with'
'input tensor.')
else:
raise ValueError('Unrecognized input tensor format.')
height, width = grid_shape[0] * \
image_shape[0], grid_shape[1] * image_shape[1]
input_tensor = tf.reshape(input_tensor,
grid_shape + image_shape + [num_channels])
input_tensor = tf.transpose(input_tensor, [0, 1, 3, 2, 4])
input_tensor = tf.reshape(
input_tensor, [grid_shape[0], width, image_shape[0], num_channels])
input_tensor = tf.transpose(input_tensor, [0, 2, 1, 3])
input_tensor = tf.reshape(input_tensor, [1, height, width, num_channels])
return input_tensor
def voxel_views(in_vox, gh, gw, tsdf=False, pad=4, scope='voxel_views'):
def vis_tsdf(tv):
return 1.0 - tf.abs(tv) * 5.0
with tf.variable_scope(scope):
_, h, w, d, ch = in_vox.get_shape().as_list()
if not tsdf:
x_view = tf.reduce_max(in_vox, axis=1)
y_view = tf.reduce_max(in_vox, axis=2)
z_view = tf.reduce_max(in_vox, axis=3)
else:
x_view = vis_tsdf(in_vox[:, h / 2, :, :, :])
y_view = vis_tsdf(in_vox[:, :, w / 2, :, :])
z_view = vis_tsdf(in_vox[:, :, :, d / 2, :])
pad = [[0, 0], [pad, pad], [pad, pad], [0, 0]]
x_view = tf.pad(x_view, pad)
y_view = tf.pad(y_view, pad)
z_view = tf.pad(z_view, pad)
image_shape = x_view.get_shape().as_list()[1:3]
grid_shape = [gh, gw]
color_ch = 1
x_view = form_image_grid(x_view, grid_shape, image_shape, color_ch)
y_view = form_image_grid(y_view, grid_shape, image_shape, color_ch)
z_view = form_image_grid(z_view, grid_shape, image_shape, color_ch)
views = [
tf.cast(x_view * 255, tf.uint8),
tf.cast(y_view * 255, tf.uint8),
tf.cast(z_view * 255, tf.uint8)
]
return views
def im_views(ims, gh, gw, scope='im_views'):
with tf.variable_scope(scope):
_, _, h, w, ch = tf_static_shape(ims)
im_grid = form_image_grid(collapse_dims(ims), [gh, gw], [h, w], ch)
return tf.cast(im_grid * 255, tf.uint8)
def voxel_sum(net, tsdf=False):
vox_sum = []
pred_views = voxel_views(
collapse_dims(net.pred_vox),
net.batch_size,
net.im_batch,
tsdf,
scope='vox_pred')
gt_views = voxel_views(net.gt_vox, net.batch_size, 1, tsdf, scope='vox_gt')
caxis = 2
with tf.name_scope('voxel_vis'):
x_view = tf.concat([pred_views[0], gt_views[0]], axis=caxis)
vox_sum.append(tf.summary.image('x_view', x_view))
y_view = tf.concat([pred_views[1], gt_views[1]], axis=caxis)
vox_sum.append(tf.summary.image('y_view', y_view))
z_view = tf.concat([pred_views[2], gt_views[2]], axis=caxis)
vox_sum.append(tf.summary.image('z_view', z_view))
return tf.summary.merge(vox_sum)
def image_sum(im_tensor, nh, nw, tag='views'):
return tf.summary.image(tag + '_sum', im_views(im_tensor, nh, nw, tag))
def vis_depth(d, min_d=1, max_d=3, sc=10):
with tf.name_scope('vis_depth'):
d_alpha = tf.to_float(tf.logical_and(d < max_d, d > min_d))
d_v = d / sc * max_d
d_v = tf.concat([d_v, d_v, d_v, d_alpha], axis=-1)
return d_v
def depth_sum(depth_tensor, nh, nw, tag='depth_views'):
return tf.summary.image(tag + '_sum',
im_views(vis_depth(depth_tensor), nh, nw, tag))
def repeat_tensor(T, nrep, rep_dim=1):
repT = tf.expand_dims(T, rep_dim)
tile_dim = [1] * len(tf_static_shape(repT))
tile_dim[rep_dim] = nrep
repT = tf.tile(repT, tile_dim)
return repT
def collapse_dims(T):
shape = tf_static_shape(T)
return tf.reshape(T, [-1] + shape[2:])
def uncollapse_dims(T, s1, s2):
shape = tf_static_shape(T)
return tf.reshape(T, [s1, s2] + shape[1:])
def tf_static_shape(T):
return T.get_shape().as_list() | 0.837188 | 0.482185 |
from __future__ import print_function
import logging
import os
import sys
import math
import random
import re
from six.moves import input
import apimetrics
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s:%(levelname)s: %(message)s',
level=os.environ.get('DEBUG_LEVEL') or logging.INFO)
log = logging.getLogger(__name__) # pylint: disable=C0103
class DeploymentCreator(apimetrics.APImetricsCLI):
# Overriding APImetricsCLI to add our command-line specific commands
def get_argument_parser(self):
parser = super(DeploymentCreator, self).get_argument_parser()
parser.add_argument('location_ids', metavar='LOC', nargs='+', help="Location ID to deploy to")
parser.add_argument('--frequency', '-f', type=int, help="Frequency to make API call (minutes)")
parser.add_argument('--interactive', '-i', help="Interactive mode, ask for each API call", action="store_true")
parser.add_argument('--name', '-n', help="Only APIs which match this name")
parser.add_argument('--no-delete', '-d', help="Don't delete existing deployments", action="store_true")
return parser
def ask_user_about_call(self, call):
if self.args.get('name'):
api_name = call['meta']['name']
if not re.search(self.args.get('name'), api_name):
return False
if self.args.get('interactive'):
inp_str = input('Change deployments for API call "{name}"? y/N: '.format(**call.get('meta')))
return inp_str.lower() == 'y'
return True
def run(self, **kwargs):
list_of_calls = self.api.list_all_calls(**kwargs)
locations = list(self.args['location_ids'])
for call in list_of_calls['results']:
if self.ask_user_about_call(call):
deployments = self.api.list_deployments_by_call(call=call['id'], **kwargs)
existing_map = {}
for deployment in deployments['results']:
dep_info = deployment.get('deployment')
if dep_info['location_id'] not in existing_map:
existing_map[dep_info['location_id']] = deployment
else:
print('Deleting duplicate deployment {location_id} for api {name}...'.format(name=call['meta']['name'], **deployment.get('deployment')), end='\t\t')
self.api.delete_deployment(deployment['id'], **kwargs)
print('OK')
# Spread out API calls, avoid exactly on the hour
frequency = self.args.get('frequency', 10)
gap = math.ceil(float(frequency * 60) / (1.0 + len(self.args['location_ids'])))
random.shuffle(locations)
for i, location_id in enumerate(locations):
if location_id not in existing_map:
deployment = {
'deployment': {
'target_id': call['id'],
'location_id': location_id,
'frequency': frequency,
'run_delay': int((i + 1) * gap),
}
}
print('New deployment {location_id} for api {name}, freq {frequency}, delay {run_delay}s...'.format(name=call['meta']['name'], **deployment['deployment']), end='\t\t')
self.api.create_deployment(deployment, **kwargs)
print('OK')
else:
old_deploy = existing_map[location_id]
old_dep_info = old_deploy['deployment']
if old_dep_info['frequency'] != frequency:
obj_id = old_deploy['id']
deployment = {
'deployment': {
'target_id': call['id'],
'location_id': location_id,
'frequency': frequency,
'run_delay': int((i + 1) * gap),
}
}
print('Update deployment {location_id} for api {name}, freq {frequency}, delay {run_delay}s...'.format(name=call['meta']['name'], **deployment['deployment']), end='\t\t')
self.api.update_deployment(obj_id, deployment, **kwargs)
print('OK')
else:
print('Existing deployment {location_id} for api {name}, freq {frequency}, delay {run_delay}s...'.format(name=call['meta']['name'], **old_deploy['deployment']), end='\t\t')
print('OK')
del existing_map[location_id]
if not self.args.get('no_delete', False):
for deployment in existing_map.values():
print('Deleting old deployment {location_id} for api {name}...'.format(name=call['meta']['name'], **deployment.get('deployment')), end='\t\t')
self.api.delete_deployment(deployment['id'], **kwargs)
print('OK')
else:
print('Keeping old deployments')
def main():
cli = DeploymentCreator()
try:
cli.run()
except apimetrics.APImetricsError as ex:
print("ERROR: {}".format(ex), file=sys.stderr)
if __name__ == '__main__':
main() | apimetrics/scripts/deploy_all_apis.py | from __future__ import print_function
import logging
import os
import sys
import math
import random
import re
from six.moves import input
import apimetrics
logging.basicConfig(
stream=sys.stdout,
format='%(asctime)s:%(levelname)s: %(message)s',
level=os.environ.get('DEBUG_LEVEL') or logging.INFO)
log = logging.getLogger(__name__) # pylint: disable=C0103
class DeploymentCreator(apimetrics.APImetricsCLI):
# Overriding APImetricsCLI to add our command-line specific commands
def get_argument_parser(self):
parser = super(DeploymentCreator, self).get_argument_parser()
parser.add_argument('location_ids', metavar='LOC', nargs='+', help="Location ID to deploy to")
parser.add_argument('--frequency', '-f', type=int, help="Frequency to make API call (minutes)")
parser.add_argument('--interactive', '-i', help="Interactive mode, ask for each API call", action="store_true")
parser.add_argument('--name', '-n', help="Only APIs which match this name")
parser.add_argument('--no-delete', '-d', help="Don't delete existing deployments", action="store_true")
return parser
def ask_user_about_call(self, call):
if self.args.get('name'):
api_name = call['meta']['name']
if not re.search(self.args.get('name'), api_name):
return False
if self.args.get('interactive'):
inp_str = input('Change deployments for API call "{name}"? y/N: '.format(**call.get('meta')))
return inp_str.lower() == 'y'
return True
def run(self, **kwargs):
list_of_calls = self.api.list_all_calls(**kwargs)
locations = list(self.args['location_ids'])
for call in list_of_calls['results']:
if self.ask_user_about_call(call):
deployments = self.api.list_deployments_by_call(call=call['id'], **kwargs)
existing_map = {}
for deployment in deployments['results']:
dep_info = deployment.get('deployment')
if dep_info['location_id'] not in existing_map:
existing_map[dep_info['location_id']] = deployment
else:
print('Deleting duplicate deployment {location_id} for api {name}...'.format(name=call['meta']['name'], **deployment.get('deployment')), end='\t\t')
self.api.delete_deployment(deployment['id'], **kwargs)
print('OK')
# Spread out API calls, avoid exactly on the hour
frequency = self.args.get('frequency', 10)
gap = math.ceil(float(frequency * 60) / (1.0 + len(self.args['location_ids'])))
random.shuffle(locations)
for i, location_id in enumerate(locations):
if location_id not in existing_map:
deployment = {
'deployment': {
'target_id': call['id'],
'location_id': location_id,
'frequency': frequency,
'run_delay': int((i + 1) * gap),
}
}
print('New deployment {location_id} for api {name}, freq {frequency}, delay {run_delay}s...'.format(name=call['meta']['name'], **deployment['deployment']), end='\t\t')
self.api.create_deployment(deployment, **kwargs)
print('OK')
else:
old_deploy = existing_map[location_id]
old_dep_info = old_deploy['deployment']
if old_dep_info['frequency'] != frequency:
obj_id = old_deploy['id']
deployment = {
'deployment': {
'target_id': call['id'],
'location_id': location_id,
'frequency': frequency,
'run_delay': int((i + 1) * gap),
}
}
print('Update deployment {location_id} for api {name}, freq {frequency}, delay {run_delay}s...'.format(name=call['meta']['name'], **deployment['deployment']), end='\t\t')
self.api.update_deployment(obj_id, deployment, **kwargs)
print('OK')
else:
print('Existing deployment {location_id} for api {name}, freq {frequency}, delay {run_delay}s...'.format(name=call['meta']['name'], **old_deploy['deployment']), end='\t\t')
print('OK')
del existing_map[location_id]
if not self.args.get('no_delete', False):
for deployment in existing_map.values():
print('Deleting old deployment {location_id} for api {name}...'.format(name=call['meta']['name'], **deployment.get('deployment')), end='\t\t')
self.api.delete_deployment(deployment['id'], **kwargs)
print('OK')
else:
print('Keeping old deployments')
def main():
cli = DeploymentCreator()
try:
cli.run()
except apimetrics.APImetricsError as ex:
print("ERROR: {}".format(ex), file=sys.stderr)
if __name__ == '__main__':
main() | 0.34632 | 0.054676 |
from spack import *
import re
import os
import glob
class Zoltan(Package):
"""The Zoltan library is a toolkit of parallel combinatorial algorithms
for parallel, unstructured, and/or adaptive scientific
applications. Zoltan's largest component is a suite of dynamic
load-balancing and partitioning algorithms that increase
applications' parallel performance by reducing idle time. Zoltan
also has graph coloring and graph ordering algorithms, which are
useful in task schedulers and parallel preconditioners.
"""
homepage = "http://www.cs.sandia.gov/zoltan"
url = "http://www.cs.sandia.gov/~kddevin/Zoltan_Distributions/zoltan_distrib_v3.83.tar.gz"
version('3.83', '1ff1bc93f91e12f2c533ddb01f2c095f')
version('3.8', '9d8fba8a990896881b85351d4327c4a9')
version('3.6', '9cce794f7241ecd8dbea36c3d7a880f9')
version('3.3', '5eb8f00bda634b25ceefa0122bd18d65')
patch('notparallel.patch', when='@3.8')
variant('debug', default=False, description='Builds a debug version of the library.')
variant('shared', default=True, description='Builds a shared version of the library.')
variant('fortran', default=True, description='Enable Fortran support.')
variant('mpi', default=True, description='Enable MPI support.')
variant('parmetis', default=False, description='Enable ParMETIS support.')
depends_on('mpi', when='+mpi')
depends_on('parmetis@4:', when='+parmetis')
depends_on('metis', when='+parmetis')
conflicts('+parmetis', when='~mpi')
def install(self, spec, prefix):
# FIXME: The older Zoltan versions fail to compile the F90 MPI wrappers
# because of some complicated generic type problem.
if spec.satisfies('@:3.6+fortran+mpi'):
raise RuntimeError(('Cannot build Zoltan v{0} with +fortran and '
'+mpi; please disable one of these features '
'or upgrade versions.').format(self.version))
config_args = [
self.get_config_flag('f90interface', 'fortran'),
self.get_config_flag('mpi', 'mpi'),
]
config_cflags = [
'-O0' if '+debug' in spec else '-O3',
'-g' if '+debug' in spec else '',
]
if '+shared' in spec:
config_args.append('RANLIB=echo')
config_args.append('--with-ar=$(CXX) -shared $(LDFLAGS) -o')
config_cflags.append(self.compiler.pic_flag)
if spec.satisfies('%gcc'):
config_args.append('--with-libs=-lgfortran')
if spec.satisfies('%intel'):
config_args.append('--with-libs=-lifcore')
if '+parmetis' in spec:
config_args.append('--with-parmetis')
config_args.append('--with-parmetis-libdir={0}'
.format(spec['parmetis'].prefix.lib))
config_args.append('--with-parmetis-incdir={0}'
.format(spec['parmetis'].prefix.include))
config_args.append('--with-incdirs=-I{0}'
.format(spec['metis'].prefix.include))
config_args.append('--with-ldflags=-L{0}'
.format(spec['metis'].prefix.lib))
if '+int64' in spec['metis']:
config_args.append('--with-id-type=ulong')
else:
config_args.append('--with-id-type=uint')
if '+mpi' in spec:
config_args.append('CC={0}'.format(spec['mpi'].mpicc))
config_args.append('CXX={0}'.format(spec['mpi'].mpicxx))
config_args.append('FC={0}'.format(spec['mpi'].mpifc))
config_args.append('--with-mpi={0}'.format(spec['mpi'].prefix))
# NOTE: Zoltan assumes that it's linking against an MPI library
# that can be found with '-lmpi' which isn't the case for many
# MPI packages. We rely on the MPI-wrappers to automatically add
# what is required for linking and thus pass an empty list of libs
config_args.append('--with-mpi-libs= ')
# NOTE: Early versions of Zoltan come packaged with a few embedded
# library packages (e.g. ParMETIS, Scotch), which messes with Spack's
# ability to descend directly into the package's source directory.
source_directory = self.stage.source_path
if spec.satisfies('@:3.6'):
zoltan_directory = 'Zoltan_v{0}'.format(self.version)
source_directory = join_path(source_directory, zoltan_directory)
build_directory = join_path(source_directory, 'build')
with working_dir(build_directory, create=True):
config = Executable(join_path(source_directory, 'configure'))
config(
'--prefix={0}'.format(prefix),
'--with-cflags={0}'.format(' '.join(config_cflags)),
'--with-cxxflags={0}'.format(' '.join(config_cflags)),
'--with-fcflags={0}'.format(' '.join(config_cflags)),
*config_args
)
# NOTE: Earlier versions of Zoltan cannot be built in parallel
# because they contain nested Makefile dependency bugs.
make(parallel=not spec.satisfies('@:3.6+fortran'))
make('install')
# NOTE: Unfortunately, Zoltan doesn't provide any configuration
# options for the extension of the output library files, so this
# script must change these extensions as a post-processing step.
if '+shared' in spec:
for lib_path in glob.glob(join_path(prefix, 'lib', '*.a')):
lib_static_name = os.path.basename(lib_path)
lib_shared_name = re.sub(r'\.a$', '.{0}'.format(dso_suffix),
lib_static_name)
move(lib_path, join_path(prefix, 'lib', lib_shared_name))
def get_config_flag(self, flag_name, flag_variant):
flag_pre = 'en' if '+{0}'.format(flag_variant) in self.spec else 'dis'
return '--{0}able-{1}'.format(flag_pre, flag_name) | var/spack/repos/builtin/packages/zoltan/package.py |
from spack import *
import re
import os
import glob
class Zoltan(Package):
"""The Zoltan library is a toolkit of parallel combinatorial algorithms
for parallel, unstructured, and/or adaptive scientific
applications. Zoltan's largest component is a suite of dynamic
load-balancing and partitioning algorithms that increase
applications' parallel performance by reducing idle time. Zoltan
also has graph coloring and graph ordering algorithms, which are
useful in task schedulers and parallel preconditioners.
"""
homepage = "http://www.cs.sandia.gov/zoltan"
url = "http://www.cs.sandia.gov/~kddevin/Zoltan_Distributions/zoltan_distrib_v3.83.tar.gz"
version('3.83', '1ff1bc93f91e12f2c533ddb01f2c095f')
version('3.8', '9d8fba8a990896881b85351d4327c4a9')
version('3.6', '9cce794f7241ecd8dbea36c3d7a880f9')
version('3.3', '5eb8f00bda634b25ceefa0122bd18d65')
patch('notparallel.patch', when='@3.8')
variant('debug', default=False, description='Builds a debug version of the library.')
variant('shared', default=True, description='Builds a shared version of the library.')
variant('fortran', default=True, description='Enable Fortran support.')
variant('mpi', default=True, description='Enable MPI support.')
variant('parmetis', default=False, description='Enable ParMETIS support.')
depends_on('mpi', when='+mpi')
depends_on('parmetis@4:', when='+parmetis')
depends_on('metis', when='+parmetis')
conflicts('+parmetis', when='~mpi')
def install(self, spec, prefix):
# FIXME: The older Zoltan versions fail to compile the F90 MPI wrappers
# because of some complicated generic type problem.
if spec.satisfies('@:3.6+fortran+mpi'):
raise RuntimeError(('Cannot build Zoltan v{0} with +fortran and '
'+mpi; please disable one of these features '
'or upgrade versions.').format(self.version))
config_args = [
self.get_config_flag('f90interface', 'fortran'),
self.get_config_flag('mpi', 'mpi'),
]
config_cflags = [
'-O0' if '+debug' in spec else '-O3',
'-g' if '+debug' in spec else '',
]
if '+shared' in spec:
config_args.append('RANLIB=echo')
config_args.append('--with-ar=$(CXX) -shared $(LDFLAGS) -o')
config_cflags.append(self.compiler.pic_flag)
if spec.satisfies('%gcc'):
config_args.append('--with-libs=-lgfortran')
if spec.satisfies('%intel'):
config_args.append('--with-libs=-lifcore')
if '+parmetis' in spec:
config_args.append('--with-parmetis')
config_args.append('--with-parmetis-libdir={0}'
.format(spec['parmetis'].prefix.lib))
config_args.append('--with-parmetis-incdir={0}'
.format(spec['parmetis'].prefix.include))
config_args.append('--with-incdirs=-I{0}'
.format(spec['metis'].prefix.include))
config_args.append('--with-ldflags=-L{0}'
.format(spec['metis'].prefix.lib))
if '+int64' in spec['metis']:
config_args.append('--with-id-type=ulong')
else:
config_args.append('--with-id-type=uint')
if '+mpi' in spec:
config_args.append('CC={0}'.format(spec['mpi'].mpicc))
config_args.append('CXX={0}'.format(spec['mpi'].mpicxx))
config_args.append('FC={0}'.format(spec['mpi'].mpifc))
config_args.append('--with-mpi={0}'.format(spec['mpi'].prefix))
# NOTE: Zoltan assumes that it's linking against an MPI library
# that can be found with '-lmpi' which isn't the case for many
# MPI packages. We rely on the MPI-wrappers to automatically add
# what is required for linking and thus pass an empty list of libs
config_args.append('--with-mpi-libs= ')
# NOTE: Early versions of Zoltan come packaged with a few embedded
# library packages (e.g. ParMETIS, Scotch), which messes with Spack's
# ability to descend directly into the package's source directory.
source_directory = self.stage.source_path
if spec.satisfies('@:3.6'):
zoltan_directory = 'Zoltan_v{0}'.format(self.version)
source_directory = join_path(source_directory, zoltan_directory)
build_directory = join_path(source_directory, 'build')
with working_dir(build_directory, create=True):
config = Executable(join_path(source_directory, 'configure'))
config(
'--prefix={0}'.format(prefix),
'--with-cflags={0}'.format(' '.join(config_cflags)),
'--with-cxxflags={0}'.format(' '.join(config_cflags)),
'--with-fcflags={0}'.format(' '.join(config_cflags)),
*config_args
)
# NOTE: Earlier versions of Zoltan cannot be built in parallel
# because they contain nested Makefile dependency bugs.
make(parallel=not spec.satisfies('@:3.6+fortran'))
make('install')
# NOTE: Unfortunately, Zoltan doesn't provide any configuration
# options for the extension of the output library files, so this
# script must change these extensions as a post-processing step.
if '+shared' in spec:
for lib_path in glob.glob(join_path(prefix, 'lib', '*.a')):
lib_static_name = os.path.basename(lib_path)
lib_shared_name = re.sub(r'\.a$', '.{0}'.format(dso_suffix),
lib_static_name)
move(lib_path, join_path(prefix, 'lib', lib_shared_name))
def get_config_flag(self, flag_name, flag_variant):
flag_pre = 'en' if '+{0}'.format(flag_variant) in self.spec else 'dis'
return '--{0}able-{1}'.format(flag_pre, flag_name) | 0.469763 | 0.245266 |
import pytest
from niacin.text.en import word
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("The man has a brown dog", 0.0, "The man has a brown dog"),
("The man has a brown dog", 1.0, "man has brown dog"),
],
)
def test_remove_articles(string, p, exp):
res = word.remove_articles(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
(
"politician persuades dramatic rhythms",
0.0,
"politician persuades dramatic rhythms",
),
(
"politician persuades dramatic rhythms",
1.0,
"politican pursuades dramtic rythyms",
),
],
)
def test_add_misspellings(string, p, exp):
res = word.add_misspelling(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("It has a feud", 0.0, "It ha a feud"),
("It has a feud", 1.0, "It ha a vendetta"),
],
)
def test_add_hyponyms(string, p, exp):
res = word.add_hyponyms(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("It has a sore", 0.0, "It ha a sore"),
("It has a sore", 1.0, "It ha a infection"),
],
)
def test_add_hypernyms(string, p, exp):
res = word.add_hypernyms(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("It is computable", 0.0, "It is computable"),
("It is computable", 1.0, "It is estimable"),
],
)
def test_add_synonyms(string, p, exp):
res = word.add_synonyms(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[("", 0.0, ""), ("", 1.0, ""), ("dog", 0.0, "dog"), ("dog", 1.0, "(((dog)))")],
)
def test_add_parens(string, p, exp):
res = word.add_parens(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("The man has a brown dog", 0.0, "The man has a brown dog"),
("The man has a brown dog", 1.0, "man The a has dog brown"),
],
)
def test_swap_words(string, p, exp):
res = word.swap_words(string, p)
assert res == exp | tests/test_text/test_en/test_word.py |
import pytest
from niacin.text.en import word
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("The man has a brown dog", 0.0, "The man has a brown dog"),
("The man has a brown dog", 1.0, "man has brown dog"),
],
)
def test_remove_articles(string, p, exp):
res = word.remove_articles(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
(
"politician persuades dramatic rhythms",
0.0,
"politician persuades dramatic rhythms",
),
(
"politician persuades dramatic rhythms",
1.0,
"politican pursuades dramtic rythyms",
),
],
)
def test_add_misspellings(string, p, exp):
res = word.add_misspelling(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("It has a feud", 0.0, "It ha a feud"),
("It has a feud", 1.0, "It ha a vendetta"),
],
)
def test_add_hyponyms(string, p, exp):
res = word.add_hyponyms(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("It has a sore", 0.0, "It ha a sore"),
("It has a sore", 1.0, "It ha a infection"),
],
)
def test_add_hypernyms(string, p, exp):
res = word.add_hypernyms(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("It is computable", 0.0, "It is computable"),
("It is computable", 1.0, "It is estimable"),
],
)
def test_add_synonyms(string, p, exp):
res = word.add_synonyms(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[("", 0.0, ""), ("", 1.0, ""), ("dog", 0.0, "dog"), ("dog", 1.0, "(((dog)))")],
)
def test_add_parens(string, p, exp):
res = word.add_parens(string, p)
assert res == exp
@pytest.mark.parametrize(
"string,p,exp",
[
("", 0.0, ""),
("", 1.0, ""),
("The man has a brown dog", 0.0, "The man has a brown dog"),
("The man has a brown dog", 1.0, "man The a has dog brown"),
],
)
def test_swap_words(string, p, exp):
res = word.swap_words(string, p)
assert res == exp | 0.477067 | 0.71103 |
import warnings
'''
Base environment definitions
See docs/api.md for api documentation
See docs/dev_docs.md for additional documentation and an example environment.
'''
class AECEnv:
'''
The AECEnv steps agents one at a time. If you are unsure if you
have implemented a AECEnv correctly, try running the `api_test` documented in
the Developer documentation on the website.
'''
def __init__(self):
pass
def step(self, action):
'''
receives a dictionary of actions keyed by the agent name.
Returns the observation dictionary, reward dictionary, done dictionary, and info dictionary,
where each dictionary is keyed by the agent.
'''
raise NotImplementedError
def reset(self):
'''
resets the environment and returns a dictionary of observations (keyed by the agent name)
'''
raise NotImplementedError
def seed(self, seed=None):
'''
Reseeds the environment (making the resulting environment deterministic).
`reset()` must be called after `seed()`, and before `step()`.
'''
pass
def observe(self, agent):
'''
Returns the observation an agent currently can make. `last()` calls this function.
'''
raise NotImplementedError
def render(self, mode='human'):
'''
Displays a rendered frame from the environment, if supported.
Alternate render modes in the default environments are `'rgb_array'`
which returns a numpy array and is supported by all environments outside of classic,
and `'ansi'` which returns the strings printed (specific to classic environments).
'''
raise NotImplementedError
def state(self):
'''
State returns a global view of the environment appropriate for
centralized training decentralized execution methods like QMIX
'''
raise NotImplementedError('state() method has not been implemented in the environment {}.'.format(self.metadata.get('name', self.__class__.__name__)))
def close(self):
'''
Closes the rendering window, subprocesses, network connections, or any other resources
that should be released.
'''
pass
def observation_space(self, agent):
'''
Takes in agent and returns the observation space for that agent.
MUST return the same value for the same agent name
Default implementation is to return the observation_spaces dict
'''
warnings.warn("Your environment should override the observation_space function. Attempting to use the observation_spaces dict attribute.")
return self.observation_spaces[agent]
def action_space(self, agent):
'''
Takes in agent and returns the action space for that agent.
MUST return the same value for the same agent name
Default implementation is to return the action_spaces dict
'''
warnings.warn("Your environment should override the action_space function. Attempting to use the action_spaces dict attribute.")
return self.action_spaces[agent]
@property
def num_agents(self):
return len(self.agents)
@property
def max_num_agents(self):
return len(self.possible_agents)
def _dones_step_first(self):
'''
Makes .agent_selection point to first done agent. Stores old value of agent_selection
so that _was_done_step can restore the variable after the done agent steps.
'''
_dones_order = [agent for agent in self.agents if self.dones[agent]]
if _dones_order:
self._skip_agent_selection = self.agent_selection
self.agent_selection = _dones_order[0]
return self.agent_selection
def _clear_rewards(self):
'''
clears all items in .rewards
'''
for agent in self.rewards:
self.rewards[agent] = 0
def _accumulate_rewards(self):
'''
adds .rewards dictionary to ._cumulative_rewards dictionary. Typically
called near the end of a step() method
'''
for agent, reward in self.rewards.items():
self._cumulative_rewards[agent] += reward
def agent_iter(self, max_iter=2**63):
'''
yields the current agent (self.agent_selection) when used in a loop where you step() each iteration.
'''
return AECIterable(self, max_iter)
def last(self, observe=True):
'''
returns observation, cumulative reward, done, info for the current agent (specified by self.agent_selection)
'''
agent = self.agent_selection
observation = self.observe(agent) if observe else None
return observation, self._cumulative_rewards[agent], self.dones[agent], self.infos[agent]
def _was_done_step(self, action):
'''
Helper function that performs step() for done agents.
Does the following:
1. Removes done agent from .agents, .dones, .rewards, ._cumulative_rewards, and .infos
2. Loads next agent into .agent_selection: if another agent is done, loads that one, otherwise load next live agent
3. Clear the rewards dict
Highly recommended to use at the beginning of step as follows:
def step(self, action):
if self.dones[self.agent_selection]:
self._was_done_step()
return
# main contents of step
'''
if action is not None:
raise ValueError("when an agent is done, the only valid action is None")
# removes done agent
agent = self.agent_selection
assert self.dones[agent], "an agent that was not done as attempted to be removed"
del self.dones[agent]
del self.rewards[agent]
del self._cumulative_rewards[agent]
del self.infos[agent]
self.agents.remove(agent)
# finds next done agent or loads next live agent (Stored in _skip_agent_selection)
_dones_order = [agent for agent in self.agents if self.dones[agent]]
if _dones_order:
if getattr(self, '_skip_agent_selection', None) is None:
self._skip_agent_selection = self.agent_selection
self.agent_selection = _dones_order[0]
else:
if getattr(self, '_skip_agent_selection', None) is not None:
self.agent_selection = self._skip_agent_selection
self._skip_agent_selection = None
self._clear_rewards()
def __str__(self):
'''
returns a name which looks like: "space_invaders_v1"
'''
if hasattr(self, 'metadata'):
return self.metadata.get('name', self.__class__.__name__)
else:
return self.__class__.__name__
@property
def unwrapped(self):
return self
class AECIterable:
def __init__(self, env, max_iter):
self.env = env
self.max_iter = max_iter
def __iter__(self):
return AECIterator(self.env, self.max_iter)
class AECIterator:
def __init__(self, env, max_iter):
self.env = env
self.iters_til_term = max_iter
def __next__(self):
if not self.env.agents or self.iters_til_term <= 0:
raise StopIteration
self.iters_til_term -= 1
return self.env.agent_selection
class ParallelEnv:
'''
The Parallel environment steps every live agent at once. If you are unsure if you
have implemented a ParallelEnv correctly, try running the `parallel_api_test` in
the Developer documentation on the website.
'''
def reset(self):
'''
resets the environment and returns a dictionary of observations (keyed by the agent name)
'''
raise NotImplementedError
def seed(self, seed=None):
'''
Reseeds the environment (making it deterministic).
`reset()` must be called after `seed()`, and before `step()`.
'''
pass
def step(self, actions):
'''
receives a dictionary of actions keyed by the agent name.
Returns the observation dictionary, reward dictionary, done dictionary,
and info dictionary, where each dictionary is keyed by the agent.
'''
raise NotImplementedError
def render(self, mode="human"):
'''
Displays a rendered frame from the environment, if supported.
Alternate render modes in the default environments are `'rgb_array'`
which returns a numpy array and is supported by all environments outside
of classic, and `'ansi'` which returns the strings printed
(specific to classic environments).
'''
raise NotImplementedError
def close(self):
'''
Closes the rendering window.
'''
pass
def state(self):
'''
State returns a global view of the environment appropriate for
centralized training decentralized execution methods like QMIX
'''
raise NotImplementedError('state() method has not been implemented in the environment {}.'.format(self.metadata.get('name', self.__class__.__name__)))
def observation_space(self, agent):
'''
Takes in agent and returns the observation space for that agent.
MUST return the same value for the same agent name
Default implementation is to return the observation_spaces dict
'''
warnings.warn("Yfour environment should override the observation_space function. Attempting to use the observation_spaces dict attribute.")
return self.observation_spaces[agent]
def action_space(self, agent):
'''
Takes in agent and returns the action space for that agent.
MUST return the same value for the same agent name
Default implementation is to return the action_spaces dict
'''
warnings.warn("Your environment should override the action_space function. Attempting to use the action_spaces dict attribute.")
return self.action_spaces[agent]
@property
def num_agents(self):
return len(self.agents)
@property
def max_num_agents(self):
return len(self.possible_agents)
def __str__(self):
'''
returns a name which looks like: "space_invaders_v1" by default
'''
if hasattr(self, 'metadata'):
return self.metadata.get('name', self.__class__.__name__)
else:
return self.__class__.__name__
@property
def unwrapped(self):
return self | pettingzoo/utils/env.py | import warnings
'''
Base environment definitions
See docs/api.md for api documentation
See docs/dev_docs.md for additional documentation and an example environment.
'''
class AECEnv:
'''
The AECEnv steps agents one at a time. If you are unsure if you
have implemented a AECEnv correctly, try running the `api_test` documented in
the Developer documentation on the website.
'''
def __init__(self):
pass
def step(self, action):
'''
receives a dictionary of actions keyed by the agent name.
Returns the observation dictionary, reward dictionary, done dictionary, and info dictionary,
where each dictionary is keyed by the agent.
'''
raise NotImplementedError
def reset(self):
'''
resets the environment and returns a dictionary of observations (keyed by the agent name)
'''
raise NotImplementedError
def seed(self, seed=None):
'''
Reseeds the environment (making the resulting environment deterministic).
`reset()` must be called after `seed()`, and before `step()`.
'''
pass
def observe(self, agent):
'''
Returns the observation an agent currently can make. `last()` calls this function.
'''
raise NotImplementedError
def render(self, mode='human'):
'''
Displays a rendered frame from the environment, if supported.
Alternate render modes in the default environments are `'rgb_array'`
which returns a numpy array and is supported by all environments outside of classic,
and `'ansi'` which returns the strings printed (specific to classic environments).
'''
raise NotImplementedError
def state(self):
'''
State returns a global view of the environment appropriate for
centralized training decentralized execution methods like QMIX
'''
raise NotImplementedError('state() method has not been implemented in the environment {}.'.format(self.metadata.get('name', self.__class__.__name__)))
def close(self):
'''
Closes the rendering window, subprocesses, network connections, or any other resources
that should be released.
'''
pass
def observation_space(self, agent):
'''
Takes in agent and returns the observation space for that agent.
MUST return the same value for the same agent name
Default implementation is to return the observation_spaces dict
'''
warnings.warn("Your environment should override the observation_space function. Attempting to use the observation_spaces dict attribute.")
return self.observation_spaces[agent]
def action_space(self, agent):
'''
Takes in agent and returns the action space for that agent.
MUST return the same value for the same agent name
Default implementation is to return the action_spaces dict
'''
warnings.warn("Your environment should override the action_space function. Attempting to use the action_spaces dict attribute.")
return self.action_spaces[agent]
@property
def num_agents(self):
return len(self.agents)
@property
def max_num_agents(self):
return len(self.possible_agents)
def _dones_step_first(self):
'''
Makes .agent_selection point to first done agent. Stores old value of agent_selection
so that _was_done_step can restore the variable after the done agent steps.
'''
_dones_order = [agent for agent in self.agents if self.dones[agent]]
if _dones_order:
self._skip_agent_selection = self.agent_selection
self.agent_selection = _dones_order[0]
return self.agent_selection
def _clear_rewards(self):
'''
clears all items in .rewards
'''
for agent in self.rewards:
self.rewards[agent] = 0
def _accumulate_rewards(self):
'''
adds .rewards dictionary to ._cumulative_rewards dictionary. Typically
called near the end of a step() method
'''
for agent, reward in self.rewards.items():
self._cumulative_rewards[agent] += reward
def agent_iter(self, max_iter=2**63):
'''
yields the current agent (self.agent_selection) when used in a loop where you step() each iteration.
'''
return AECIterable(self, max_iter)
def last(self, observe=True):
'''
returns observation, cumulative reward, done, info for the current agent (specified by self.agent_selection)
'''
agent = self.agent_selection
observation = self.observe(agent) if observe else None
return observation, self._cumulative_rewards[agent], self.dones[agent], self.infos[agent]
def _was_done_step(self, action):
'''
Helper function that performs step() for done agents.
Does the following:
1. Removes done agent from .agents, .dones, .rewards, ._cumulative_rewards, and .infos
2. Loads next agent into .agent_selection: if another agent is done, loads that one, otherwise load next live agent
3. Clear the rewards dict
Highly recommended to use at the beginning of step as follows:
def step(self, action):
if self.dones[self.agent_selection]:
self._was_done_step()
return
# main contents of step
'''
if action is not None:
raise ValueError("when an agent is done, the only valid action is None")
# removes done agent
agent = self.agent_selection
assert self.dones[agent], "an agent that was not done as attempted to be removed"
del self.dones[agent]
del self.rewards[agent]
del self._cumulative_rewards[agent]
del self.infos[agent]
self.agents.remove(agent)
# finds next done agent or loads next live agent (Stored in _skip_agent_selection)
_dones_order = [agent for agent in self.agents if self.dones[agent]]
if _dones_order:
if getattr(self, '_skip_agent_selection', None) is None:
self._skip_agent_selection = self.agent_selection
self.agent_selection = _dones_order[0]
else:
if getattr(self, '_skip_agent_selection', None) is not None:
self.agent_selection = self._skip_agent_selection
self._skip_agent_selection = None
self._clear_rewards()
def __str__(self):
'''
returns a name which looks like: "space_invaders_v1"
'''
if hasattr(self, 'metadata'):
return self.metadata.get('name', self.__class__.__name__)
else:
return self.__class__.__name__
@property
def unwrapped(self):
return self
class AECIterable:
def __init__(self, env, max_iter):
self.env = env
self.max_iter = max_iter
def __iter__(self):
return AECIterator(self.env, self.max_iter)
class AECIterator:
def __init__(self, env, max_iter):
self.env = env
self.iters_til_term = max_iter
def __next__(self):
if not self.env.agents or self.iters_til_term <= 0:
raise StopIteration
self.iters_til_term -= 1
return self.env.agent_selection
class ParallelEnv:
'''
The Parallel environment steps every live agent at once. If you are unsure if you
have implemented a ParallelEnv correctly, try running the `parallel_api_test` in
the Developer documentation on the website.
'''
def reset(self):
'''
resets the environment and returns a dictionary of observations (keyed by the agent name)
'''
raise NotImplementedError
def seed(self, seed=None):
'''
Reseeds the environment (making it deterministic).
`reset()` must be called after `seed()`, and before `step()`.
'''
pass
def step(self, actions):
'''
receives a dictionary of actions keyed by the agent name.
Returns the observation dictionary, reward dictionary, done dictionary,
and info dictionary, where each dictionary is keyed by the agent.
'''
raise NotImplementedError
def render(self, mode="human"):
'''
Displays a rendered frame from the environment, if supported.
Alternate render modes in the default environments are `'rgb_array'`
which returns a numpy array and is supported by all environments outside
of classic, and `'ansi'` which returns the strings printed
(specific to classic environments).
'''
raise NotImplementedError
def close(self):
'''
Closes the rendering window.
'''
pass
def state(self):
'''
State returns a global view of the environment appropriate for
centralized training decentralized execution methods like QMIX
'''
raise NotImplementedError('state() method has not been implemented in the environment {}.'.format(self.metadata.get('name', self.__class__.__name__)))
def observation_space(self, agent):
'''
Takes in agent and returns the observation space for that agent.
MUST return the same value for the same agent name
Default implementation is to return the observation_spaces dict
'''
warnings.warn("Yfour environment should override the observation_space function. Attempting to use the observation_spaces dict attribute.")
return self.observation_spaces[agent]
def action_space(self, agent):
'''
Takes in agent and returns the action space for that agent.
MUST return the same value for the same agent name
Default implementation is to return the action_spaces dict
'''
warnings.warn("Your environment should override the action_space function. Attempting to use the action_spaces dict attribute.")
return self.action_spaces[agent]
@property
def num_agents(self):
return len(self.agents)
@property
def max_num_agents(self):
return len(self.possible_agents)
def __str__(self):
'''
returns a name which looks like: "space_invaders_v1" by default
'''
if hasattr(self, 'metadata'):
return self.metadata.get('name', self.__class__.__name__)
else:
return self.__class__.__name__
@property
def unwrapped(self):
return self | 0.8308 | 0.51879 |
import datetime
import json
import pytest
from app import app
from app.db_connection import get_db_connection
@pytest.fixture()
def clean_up():
print("setup")
yield
print("deleting")
docs = [
'2021-06-01_ACT_Estropada-test',
'2021-06-01_ARC1_Estropada-test2',
'2021-06-02_ARC1_Estropada-test4',
'2021-06-01_ACT_Kaiku'
]
with get_db_connection() as database:
for doc_id in docs:
try:
doc = database[doc_id]
if doc.exists():
doc.fetch()
doc.delete()
except KeyError:
pass
@pytest.fixture()
def two_day_competition():
print("setup")
docs = [{
"_id": "2021-06-01_ACT_J1",
"data": "2021-06-01T18:00:00",
"izena": "J1",
"liga": "ACT",
"urla": "http://foo.com",
"bi_jardunaldiko_bandera": True,
"jardunaldia": 1,
"related_estropada": "2021-06-02_ACT_J2"
}, {
"_id": "2021-06-02_ACT_J2",
"data": "2021-06-02T18:00:00",
"izena": "J2",
"liga": "ACT",
"urla": "http://foo.com",
"bi_jardunaldiko_bandera": True,
"jardunaldia": 2,
"related_estropada": "2021-06-01_ACT_J1"
}]
with get_db_connection() as database:
for doc in docs:
try:
docum = database.create_document(doc)
print(f"Created {docum['_id']}")
except KeyError:
pass
yield
for doc in docs:
try:
doc = database[doc['_id']]
print(f"Deleted {doc['_id']}")
if doc.exists():
doc.fetch()
doc.delete()
except KeyError:
pass
class TestEstropadak():
@pytest.fixture()
def estropadakApp(self):
return app.test_client()
def tearDown(self):
pass
def testActiveYear(self, estropadakApp):
rv = estropadakApp.get('/active_year')
year = json.loads(rv.data.decode('utf-8'))
n = datetime.datetime.now()
y = n.year
assert year == y or year == y-1
def testEstropadakList(self, estropadakApp):
rv = estropadakApp.get('/estropadak?league=act&year=2010', )
estropadak = json.loads(rv.data.decode('utf-8'))
assert len(estropadak) == 20
assert estropadak[0]['id'] == "2010-07-03_ACT_I-Bandera-SEAT---G.P.-Villa-de-Bilbao"
assert estropadak[0]["izena"] == "I Bandera SEAT - G.P. Villa de Bilbao"
assert estropadak[0]["data"] == "2010-07-03T17:00:00"
assert estropadak[0]["liga"] == "ACT"
assert estropadak[0]["urla"] == "http://www.euskolabelliga.com/resultados/ver.php?id=eu&r=1269258408"
assert estropadak[0]["lekua"] == "Bilbao Bizkaia"
assert estropadak[0]["kategoriak"] == []
def testEstropadakListWithoutResults(self, estropadakApp):
rv = estropadakApp.get('/estropadak?league=act&year=1900')
assert rv.status_code == 400
def testEstropadakListWithWrongLeague(self, estropadakApp):
rv = estropadakApp.get('/estropadak?league=actt&year=2010')
assert rv.status_code == 400
def testEstropadakWithoutParams(self, estropadakApp):
rv = estropadakApp.get('/estropadak')
assert rv.status_code == 200
def testEstropadakWithBadPaginationParams(self, estropadakApp):
rv = estropadakApp.get('/estropadak?page=r')
assert rv.status_code == 400
rv = estropadakApp.get('/estropadak?count=r')
assert rv.status_code == 400
def testEstropadakWithDefaultPaginationParams(self, estropadakApp):
rv = estropadakApp.get('/estropadak')
assert rv.status_code == 200
print(rv.get_json())
assert len(rv.get_json()) == 50
def testEstropada(self, estropadakApp):
rv = estropadakApp.get('/estropadak/1c79d46b8c74ad399d54fd7ee40005e3')
estropada = json.loads(rv.data.decode('utf-8'))
assert estropada['izena'] == 'III Bandera Euskadi Basque Country'
def testEstropadaNotFound(self, estropadakApp):
rv = estropadakApp.get('/estropadak/fuck')
# estropada = json.loads(rv.data.decode('utf-8'))
assert rv.status_code == 404
def testEstropadaCreationWithCredentials(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test",
"data": "2021-06-01 17:00",
"liga": "ACT",
"sailkapena": [],
"lekua": "Nonbait"
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 201
def testEstropadaCreationWithoutCredentials(self, estropadakApp, credentials, clean_up):
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test",
"data": "2021-06-01 17:00",
"liga": "ACT",
"sailkapena": []
})
assert rv.status_code == 401
def testEstropadaModificationWithoutCredentials(self, estropadakApp, credentials):
rv = estropadakApp.put('/estropadak/2021_act_estropada', json={
"izena": "Estropada test",
"data": "2021-06-01 17:00",
"liga": "ACT",
"sailkapena": [],
})
assert rv.status_code == 401
def testEstropadaModificationWithCredentials(self, estropadakApp, credentials, clean_up):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test2",
"data": "2021-06-01 17:00",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
rv = estropadakApp.put('/estropadak/2021-06-01_ARC1_Estropada-test2', json={
"izena": "Estropada test2",
"data": "2021-06-01 17:30",
"liga": "ARC1",
"sailkapena": [],
"lekua": "Nonbait"
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 200
rv = estropadakApp.get('/estropadak/2021-06-01_ARC1_Estropada-test2')
recovered_doc = rv.get_json()
recovered_doc['izena'] == "Estropada test2"
recovered_doc['data'] == "2021-06-01 17:30"
recovered_doc['liga'] == "arc1"
recovered_doc['lekua'] == 'Nonbait'
recovered_doc['sailkapena'] == []
def testEstropadaDeletionWithoutCredentials(self, estropadakApp, credentials, clean_up):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test4",
"data": "2021-06-02 17:00",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 201
rv = estropadakApp.delete('/estropadak/2021-06-02_ARC1_Estropada-test4')
assert rv.status_code == 401
def testEstropadaDeletionWithCredentials(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test3",
"data": "2021-06-02 17:00",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 201
rv = estropadakApp.delete('/estropadak/2021-06-02_ARC1_Estropada-test3', headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 200
def testEstropadaCreationWithMissingDataInModel(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test5",
"data": "2021-06-10 17:00",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 400
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test5",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 400
rv = estropadakApp.post('/estropadak', json={
"izena": "",
"data": "2021-06-10 17:00",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 400
def testEstropadaCreationWithUnsupportedLiga(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test5",
"liga": "ACTT",
"data": "2021-06-10 17:00",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 400
def testEstropadaCreationWithSailkapena(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test",
"data": "2021-06-01 17:00",
"liga": "ACT",
"sailkapena": [{
"talde_izena": "KAIKU",
"denbora": "20:14,84",
"puntuazioa": 5,
"posizioa": 8,
"tanda": 1,
"tanda_postua": 1,
"kalea": 1,
"ziabogak": [
"05:06",
"09:56",
"15:24"
]
}]
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 201
rv = estropadakApp.get('/estropadak/2021-06-01_ACT_Estropada-test')
assert rv.status_code == 200
rv = estropadakApp.get('/emaitzak/2021-06-01_ACT_Kaiku')
assert rv.status_code == 200
def test_estropada_with_two_day_sailkapena(self, estropadakApp):
rv = estropadakApp.get('/estropadak/2021-07-03_ACT_V-Bandeira-cidade-da-Coruรฑa-(J1)')
estropada = json.loads(rv.data.decode('utf-8'))
assert len(estropada['bi_eguneko_sailkapena']) == 12
for sailk in estropada['bi_eguneko_sailkapena']:
if sailk['talde_izena'] == 'GO FIT HONDARRIBIA':
assert sailk['denbora_batura'] == '41:22,44'
def test_estropada_with_two_day_sailkapena_still_unplayed(self, estropadakApp, two_day_competition):
rv = estropadakApp.get('/estropadak/2021-06-01_ACT_J1')
estropada = json.loads(rv.data.decode('utf-8'))
assert len(estropada['bi_eguneko_sailkapena']) == 0
assert estropada['bi_eguneko_sailkapena'] == []
assert estropada['related_estropada'] == '2021-06-02_ACT_J2'
assert estropada['jardunaldia'] == 1 | test/test_estropadak.py | import datetime
import json
import pytest
from app import app
from app.db_connection import get_db_connection
@pytest.fixture()
def clean_up():
print("setup")
yield
print("deleting")
docs = [
'2021-06-01_ACT_Estropada-test',
'2021-06-01_ARC1_Estropada-test2',
'2021-06-02_ARC1_Estropada-test4',
'2021-06-01_ACT_Kaiku'
]
with get_db_connection() as database:
for doc_id in docs:
try:
doc = database[doc_id]
if doc.exists():
doc.fetch()
doc.delete()
except KeyError:
pass
@pytest.fixture()
def two_day_competition():
print("setup")
docs = [{
"_id": "2021-06-01_ACT_J1",
"data": "2021-06-01T18:00:00",
"izena": "J1",
"liga": "ACT",
"urla": "http://foo.com",
"bi_jardunaldiko_bandera": True,
"jardunaldia": 1,
"related_estropada": "2021-06-02_ACT_J2"
}, {
"_id": "2021-06-02_ACT_J2",
"data": "2021-06-02T18:00:00",
"izena": "J2",
"liga": "ACT",
"urla": "http://foo.com",
"bi_jardunaldiko_bandera": True,
"jardunaldia": 2,
"related_estropada": "2021-06-01_ACT_J1"
}]
with get_db_connection() as database:
for doc in docs:
try:
docum = database.create_document(doc)
print(f"Created {docum['_id']}")
except KeyError:
pass
yield
for doc in docs:
try:
doc = database[doc['_id']]
print(f"Deleted {doc['_id']}")
if doc.exists():
doc.fetch()
doc.delete()
except KeyError:
pass
class TestEstropadak():
@pytest.fixture()
def estropadakApp(self):
return app.test_client()
def tearDown(self):
pass
def testActiveYear(self, estropadakApp):
rv = estropadakApp.get('/active_year')
year = json.loads(rv.data.decode('utf-8'))
n = datetime.datetime.now()
y = n.year
assert year == y or year == y-1
def testEstropadakList(self, estropadakApp):
rv = estropadakApp.get('/estropadak?league=act&year=2010', )
estropadak = json.loads(rv.data.decode('utf-8'))
assert len(estropadak) == 20
assert estropadak[0]['id'] == "2010-07-03_ACT_I-Bandera-SEAT---G.P.-Villa-de-Bilbao"
assert estropadak[0]["izena"] == "I Bandera SEAT - G.P. Villa de Bilbao"
assert estropadak[0]["data"] == "2010-07-03T17:00:00"
assert estropadak[0]["liga"] == "ACT"
assert estropadak[0]["urla"] == "http://www.euskolabelliga.com/resultados/ver.php?id=eu&r=1269258408"
assert estropadak[0]["lekua"] == "Bilbao Bizkaia"
assert estropadak[0]["kategoriak"] == []
def testEstropadakListWithoutResults(self, estropadakApp):
rv = estropadakApp.get('/estropadak?league=act&year=1900')
assert rv.status_code == 400
def testEstropadakListWithWrongLeague(self, estropadakApp):
rv = estropadakApp.get('/estropadak?league=actt&year=2010')
assert rv.status_code == 400
def testEstropadakWithoutParams(self, estropadakApp):
rv = estropadakApp.get('/estropadak')
assert rv.status_code == 200
def testEstropadakWithBadPaginationParams(self, estropadakApp):
rv = estropadakApp.get('/estropadak?page=r')
assert rv.status_code == 400
rv = estropadakApp.get('/estropadak?count=r')
assert rv.status_code == 400
def testEstropadakWithDefaultPaginationParams(self, estropadakApp):
rv = estropadakApp.get('/estropadak')
assert rv.status_code == 200
print(rv.get_json())
assert len(rv.get_json()) == 50
def testEstropada(self, estropadakApp):
rv = estropadakApp.get('/estropadak/1c79d46b8c74ad399d54fd7ee40005e3')
estropada = json.loads(rv.data.decode('utf-8'))
assert estropada['izena'] == 'III Bandera Euskadi Basque Country'
def testEstropadaNotFound(self, estropadakApp):
rv = estropadakApp.get('/estropadak/fuck')
# estropada = json.loads(rv.data.decode('utf-8'))
assert rv.status_code == 404
def testEstropadaCreationWithCredentials(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test",
"data": "2021-06-01 17:00",
"liga": "ACT",
"sailkapena": [],
"lekua": "Nonbait"
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 201
def testEstropadaCreationWithoutCredentials(self, estropadakApp, credentials, clean_up):
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test",
"data": "2021-06-01 17:00",
"liga": "ACT",
"sailkapena": []
})
assert rv.status_code == 401
def testEstropadaModificationWithoutCredentials(self, estropadakApp, credentials):
rv = estropadakApp.put('/estropadak/2021_act_estropada', json={
"izena": "Estropada test",
"data": "2021-06-01 17:00",
"liga": "ACT",
"sailkapena": [],
})
assert rv.status_code == 401
def testEstropadaModificationWithCredentials(self, estropadakApp, credentials, clean_up):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test2",
"data": "2021-06-01 17:00",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
rv = estropadakApp.put('/estropadak/2021-06-01_ARC1_Estropada-test2', json={
"izena": "Estropada test2",
"data": "2021-06-01 17:30",
"liga": "ARC1",
"sailkapena": [],
"lekua": "Nonbait"
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 200
rv = estropadakApp.get('/estropadak/2021-06-01_ARC1_Estropada-test2')
recovered_doc = rv.get_json()
recovered_doc['izena'] == "Estropada test2"
recovered_doc['data'] == "2021-06-01 17:30"
recovered_doc['liga'] == "arc1"
recovered_doc['lekua'] == 'Nonbait'
recovered_doc['sailkapena'] == []
def testEstropadaDeletionWithoutCredentials(self, estropadakApp, credentials, clean_up):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test4",
"data": "2021-06-02 17:00",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 201
rv = estropadakApp.delete('/estropadak/2021-06-02_ARC1_Estropada-test4')
assert rv.status_code == 401
def testEstropadaDeletionWithCredentials(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test3",
"data": "2021-06-02 17:00",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 201
rv = estropadakApp.delete('/estropadak/2021-06-02_ARC1_Estropada-test3', headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 200
def testEstropadaCreationWithMissingDataInModel(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test5",
"data": "2021-06-10 17:00",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 400
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test5",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 400
rv = estropadakApp.post('/estropadak', json={
"izena": "",
"data": "2021-06-10 17:00",
"liga": "ARC1",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 400
def testEstropadaCreationWithUnsupportedLiga(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test5",
"liga": "ACTT",
"data": "2021-06-10 17:00",
"sailkapena": []
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 400
def testEstropadaCreationWithSailkapena(self, estropadakApp, credentials):
rv = estropadakApp.post('/auth', json=credentials)
token = rv.json['access_token']
rv = estropadakApp.post('/estropadak', json={
"izena": "Estropada test",
"data": "2021-06-01 17:00",
"liga": "ACT",
"sailkapena": [{
"talde_izena": "KAIKU",
"denbora": "20:14,84",
"puntuazioa": 5,
"posizioa": 8,
"tanda": 1,
"tanda_postua": 1,
"kalea": 1,
"ziabogak": [
"05:06",
"09:56",
"15:24"
]
}]
}, headers=[('Authorization', f'JWT {token}')])
assert rv.status_code == 201
rv = estropadakApp.get('/estropadak/2021-06-01_ACT_Estropada-test')
assert rv.status_code == 200
rv = estropadakApp.get('/emaitzak/2021-06-01_ACT_Kaiku')
assert rv.status_code == 200
def test_estropada_with_two_day_sailkapena(self, estropadakApp):
rv = estropadakApp.get('/estropadak/2021-07-03_ACT_V-Bandeira-cidade-da-Coruรฑa-(J1)')
estropada = json.loads(rv.data.decode('utf-8'))
assert len(estropada['bi_eguneko_sailkapena']) == 12
for sailk in estropada['bi_eguneko_sailkapena']:
if sailk['talde_izena'] == 'GO FIT HONDARRIBIA':
assert sailk['denbora_batura'] == '41:22,44'
def test_estropada_with_two_day_sailkapena_still_unplayed(self, estropadakApp, two_day_competition):
rv = estropadakApp.get('/estropadak/2021-06-01_ACT_J1')
estropada = json.loads(rv.data.decode('utf-8'))
assert len(estropada['bi_eguneko_sailkapena']) == 0
assert estropada['bi_eguneko_sailkapena'] == []
assert estropada['related_estropada'] == '2021-06-02_ACT_J2'
assert estropada['jardunaldia'] == 1 | 0.266644 | 0.298137 |
import os
import unittest
from test import support
spwd = support.import_module('spwd')
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() == 0,
'root privileges required')
class TestSpwdRoot(unittest.TestCase):
def test_getspall(self):
entries = spwd.getspall()
self.assertIsInstance(entries, list)
for entry in entries:
self.assertIsInstance(entry, spwd.struct_spwd)
def test_getspnam(self):
entries = spwd.getspall()
if not entries:
self.skipTest('empty shadow password database')
random_name = entries[0].sp_namp
entry = spwd.getspnam(random_name)
self.assertIsInstance(entry, spwd.struct_spwd)
self.assertEqual(entry.sp_namp, random_name)
self.assertEqual(entry.sp_namp, entry[0])
self.assertEqual(entry.sp_namp, entry.sp_nam)
self.assertIsInstance(entry.sp_pwdp, str)
self.assertEqual(entry.sp_pwdp, entry[1])
self.assertEqual(entry.sp_pwdp, entry.sp_pwd)
self.assertIsInstance(entry.sp_lstchg, int)
self.assertEqual(entry.sp_lstchg, entry[2])
self.assertIsInstance(entry.sp_min, int)
self.assertEqual(entry.sp_min, entry[3])
self.assertIsInstance(entry.sp_max, int)
self.assertEqual(entry.sp_max, entry[4])
self.assertIsInstance(entry.sp_warn, int)
self.assertEqual(entry.sp_warn, entry[5])
self.assertIsInstance(entry.sp_inact, int)
self.assertEqual(entry.sp_inact, entry[6])
self.assertIsInstance(entry.sp_expire, int)
self.assertEqual(entry.sp_expire, entry[7])
self.assertIsInstance(entry.sp_flag, int)
self.assertEqual(entry.sp_flag, entry[8])
with self.assertRaises(KeyError) as cx:
spwd.getspnam('invalid user name')
self.assertEqual(str(cx.exception), "'getspnam(): name not found'")
self.assertRaises(TypeError, spwd.getspnam)
self.assertRaises(TypeError, spwd.getspnam, 0)
self.assertRaises(TypeError, spwd.getspnam, random_name, 0)
try:
bytes_name = os.fsencode(random_name)
except UnicodeEncodeError:
pass
else:
self.assertRaises(TypeError, spwd.getspnam, bytes_name)
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() != 0,
'non-root user required')
class TestSpwdNonRoot(unittest.TestCase):
def test_getspnam_exception(self):
name = 'bin'
try:
with self.assertRaises(PermissionError) as cm:
spwd.getspnam(name)
except KeyError as exc:
self.skipTest("spwd entry %r doesn't exist: %s" % (name, exc))
if __name__ == "__main__":
unittest.main() | Lib/test/test_spwd.py | import os
import unittest
from test import support
spwd = support.import_module('spwd')
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() == 0,
'root privileges required')
class TestSpwdRoot(unittest.TestCase):
def test_getspall(self):
entries = spwd.getspall()
self.assertIsInstance(entries, list)
for entry in entries:
self.assertIsInstance(entry, spwd.struct_spwd)
def test_getspnam(self):
entries = spwd.getspall()
if not entries:
self.skipTest('empty shadow password database')
random_name = entries[0].sp_namp
entry = spwd.getspnam(random_name)
self.assertIsInstance(entry, spwd.struct_spwd)
self.assertEqual(entry.sp_namp, random_name)
self.assertEqual(entry.sp_namp, entry[0])
self.assertEqual(entry.sp_namp, entry.sp_nam)
self.assertIsInstance(entry.sp_pwdp, str)
self.assertEqual(entry.sp_pwdp, entry[1])
self.assertEqual(entry.sp_pwdp, entry.sp_pwd)
self.assertIsInstance(entry.sp_lstchg, int)
self.assertEqual(entry.sp_lstchg, entry[2])
self.assertIsInstance(entry.sp_min, int)
self.assertEqual(entry.sp_min, entry[3])
self.assertIsInstance(entry.sp_max, int)
self.assertEqual(entry.sp_max, entry[4])
self.assertIsInstance(entry.sp_warn, int)
self.assertEqual(entry.sp_warn, entry[5])
self.assertIsInstance(entry.sp_inact, int)
self.assertEqual(entry.sp_inact, entry[6])
self.assertIsInstance(entry.sp_expire, int)
self.assertEqual(entry.sp_expire, entry[7])
self.assertIsInstance(entry.sp_flag, int)
self.assertEqual(entry.sp_flag, entry[8])
with self.assertRaises(KeyError) as cx:
spwd.getspnam('invalid user name')
self.assertEqual(str(cx.exception), "'getspnam(): name not found'")
self.assertRaises(TypeError, spwd.getspnam)
self.assertRaises(TypeError, spwd.getspnam, 0)
self.assertRaises(TypeError, spwd.getspnam, random_name, 0)
try:
bytes_name = os.fsencode(random_name)
except UnicodeEncodeError:
pass
else:
self.assertRaises(TypeError, spwd.getspnam, bytes_name)
@unittest.skipUnless(hasattr(os, 'geteuid') and os.geteuid() != 0,
'non-root user required')
class TestSpwdNonRoot(unittest.TestCase):
def test_getspnam_exception(self):
name = 'bin'
try:
with self.assertRaises(PermissionError) as cm:
spwd.getspnam(name)
except KeyError as exc:
self.skipTest("spwd entry %r doesn't exist: %s" % (name, exc))
if __name__ == "__main__":
unittest.main() | 0.428712 | 0.432243 |
class MiscellaneousLoads:
def anpres(self, nfram="", delay="", ncycl="", refframe="", **kwargs):
"""Produces an animated sequence of the time-harmonic pressure variation
APDL Command: ANPRES
of an engine-order excitation in a cyclic harmonic analysis.
Parameters
----------
nfram
Number of frame captures per cycle. Defaults to 3 times the number
of sectors.
delay
Time delay (seconds) during animation. Defaults to 0.1 seconds.
ncycl
Number of animation cycles. Defaults to 5.
refframe
Reference frame for the model rotation.
0 - Rotating reference frame (default). The model remains fixed in space and the
pressure revolve around the model.
1 - Stationary reference frame. The model rotates and the pressure locations remain
fixed in space.
Notes
-----
ANPRES invokes a macro which produces an animated sequence of the time-
harmonic applied pressure in the case of a mode-superposition harmonic
analysis (ANTYPE,HARMIC with CYCOPT,MSUP,ON). The engine-order
excitation must also have been specified (CYCFREQ,EO). While pressure
loads are not accepted as valid loading in a mode-superposition
analysis (they must be applied in the modal analysis and the modal load
vector applied in the mode-superposition analysis) you can apply them
for the purposes of this animation.
For RefFrame = 1 (stationary reference frame), the rotational velocity
from the Linear Perturbation step, or the current OMEGA or CGOMGA
value, is used to determine the rotation direction about the cyclic
cylindrical axis, otherwise a positive rotation is assumed.
You may use /HBC,,ON to hide overlapping pressure faces, and use
/GLINE,,-1 to suppress the element outlines if desired.
"""
command = f"ANPRES,{nfram},{delay},{ncycl},{refframe}"
return self.run(command, **kwargs)
def aport(
self,
portnum="",
label="",
kcn="",
pres="",
phase="",
val1="",
val2="",
val3="",
val4="",
**kwargs,
):
"""Specifies input data for plane wave and acoustic duct ports.
APDL Command: APORT
Parameters
----------
portnum
Port number. This number is associated with an exterior port
or interior port previously specified by the SF and BF family
of commands, respectively. The number must be between 1 and
50.
Label
* ``"PLAN"`` : Incident plane wave.
* ``"RECT"`` : Rectangular duct.
* ``"CIRC"`` : Circular duct.
* ``"COAX"`` : Coaxial duct.
* ``"LIST"`` : List the port settings. If PortNum = ALL, list the port settings for all defined ports.
* ``"DELE"`` : Delete defined ports. If PortNum = ALL, delete all defined ports.
kcn
A previously-defined local (KCN >10) or global (KCN = 0)
Cartesian coordinate system number used to specify the
geometric properties of the duct. Defaults to the global
Cartesian coordinate system (0). The local Z-direction must be
the direction of wave propagation. The origin of the local
coordinate system must be centered about the face of the duct
port without considering symmetry.
pres
Zero-to-peak amplitude of the pressure. If blank, the port
will appear as a matching impedance.
phase
Phase angle of the applied pressure in degrees. Defaults to 0.
VAL1, VAL2, VAL3, VAL4
Additional input. The meaning of VAL1 through VAL4 varies
depending on the specified Label. If ``label="PLAN"``:
* ``"VAL1"`` : angle from positive X-axis to positive Y-axis
in the local Cartesian coordinates (KCN).
* ``"VAL2"`` : angle away from positive Z-axis in the local
Cartesian coordinates (KCN).
if ``label="RECT"``:
* ``"VAL1"`` : Width of the rectangular duct.
* ``"VAL2"`` : Height of the rectangular duct.
* ``"VAL3"`` : Mode index for pressure variation along the
width (defaults to 0).
* ``"VAL4"`` : Mode index for pressure variation along the
height (defaults to 0).
if ``label="CIRC"``:
* ``"VAL1"`` : Radius of the circular duct.
* ``"VAL2"`` : Not used.
* ``"VAL3"`` : Mode index for pressure variation along the
azimuth (defaults to 0).
* ``"VAL4"`` : Mode index for pressure variation along the
radii (defaults to 0).
if ``label="COAX"``:
* ``"VAL1"`` : Inner radius of the coaxial duct.
* ``"VAL2"`` : Outer radius of the coaxial duct.
* ``"VAL3"`` : Mode index for pressure variation along the
azimuth (defaults to 0).
* ``"VAL4"`` : Mode index for pressure variation along the
radii (defaults to 0).
Notes
-----
Use the APORT command to launch a specified analytic acoustic mode
into a guided duct.
The low-order FLUID30 element does not support the higher modes in
the coaxial duct ``label="COAX"``.
For more information, see Specified Mode Excitation in an Acoustic
Duct in the Acoustic Analysis Guide, and Analytic Port Modes in a
Duct in the Mechanical APDL Theory Reference.
"""
command = (
f"APORT,{portnum},{label},{kcn},{pres},{phase},,{val1},{val2},{val3},{val4}"
)
return self.run(command, **kwargs)
def asifile(
self, opt="", fname="", ext="", oper="", kdim="", kout="", limit="", **kwargs
):
"""Writes or reads one-way acoustic-structural coupling data.
APDL Command: ASIFILE
Parameters
----------
opt
Command behavior option:
WRITE - Write the structural results to the specified file.
READ - Read the structural results from the specified file.
fname
File name and directory path of a one-way acoustic-structural
coupling data file (248 characters maximum, including the
characters needed for the directory path). An unspecified directory
path defaults to the working directory; in this case, you can use
all 248 characters for the file name (defaults to jobname).
ext
File name extension of the one-way acoustic-structural coupling
data file (defaults to .asi).
oper
Command operation:
NOMAP - No mapping occurs between the structural and acoustic models when reading the
structural results from the specified file (default).
MAP - Maps the results from the structural to the acoustic model. (See "Notes".)
kdim
Interpolation criteria. Valid only when Oper = MAP.
kout
Outside region results. Valid only when Oper = MAP.
limit
Number of nearby nodes considered for interpolation. Valid only
when Oper = MAP.
Notes
-----
The ASIFILE command writes to, or reads from, a file containing one-way
acoustic-structural coupling data.
Results data on the one-way coupling interface (defined by the
SF,,FSIN) in the structural model are written to the one-way coupling
result data file during the structural solution.
One-way coupling results data are read into the acoustic model as the
velocity (harmonic) or acceleration (transient) excitation during the
sequential acoustic solution.
If Oper = NOMAP, both structural and acoustic models must share the
same node number on the one-way coupling interface.
If Oper = MAP:
The one-way coupling interface must be defined in the acoustic model
(SF,,FSIN) such that it corresponds to the field-surface interface
number (FSIN) in the structural model.
The output points are correct only if they are within the boundaries
set via the specified input points.
Calculations for out-of-bound points require much more processing time
than do points that are within bounds.
For each point in the acoustic destination mesh, the command searches
all possible triangles in the structural source mesh to find the best
triangle containing each point, then performs a linear interpolation
inside this triangle. For faster and more accurate results, consider
your interpolation method and search criteria carefully (see LIMIT).
One-way coupling excitation can be applied to multiple frequencies or
time steps.
"""
command = f"ASIFILE,{opt},{fname},{ext},{oper},{kdim},{kout},{limit}"
return self.run(command, **kwargs)
def awave(
self,
wavenum="",
wavetype="",
opt1="",
opt2="",
val1="",
val2="",
val3="",
val4="",
val5="",
val6="",
val7="",
val8="",
val9="",
val10="",
val11="",
val12="",
val13="",
**kwargs,
):
"""Specifies input data for an acoustic incident wave.
APDL Command: AWAVE
Parameters
----------
wavenum
Wave number. You specify the integer number for an acoustic
incident wave inside or outside the model. The number must be
between 1 and 20.
wavetype
Wave type:
PLAN - Planar incident wave
MONO - Monopole or pulsating sphere incident wave
DIPO - Dipole incident wave
BACK - Back enclosed loudspeaker
BARE - Bare loudspeaker
STATUS - Displays the status of the acoustic wave settings if Wavenum = a number between
1 and 20 or ALL.
DELE - Deletes the acoustic wave settings if Wavenum = a number between 1 and 20 or
ALL.
opt1
PRES
PRES - Pressure
VELO - Velocity
opt2
EXT
EXT - Incident wave outside the model.
INT - Incident wave inside the model. This option is only available for pure
scattered pressure formulation.
val1, val2, val3, . . . , val13
If Wavetype = PLAN, MONO, DIPO, BACK, or BARE:
VAL1 - Amplitude of pressure or normal velocity to the sphere surface.
VAL2 - Phase angle of the applied pressure or velocity (in degrees). Defaults to 0
degrees.
Notes
-----
Use the ASOL command to activate the scattered field algorithm and the
ASCRES command for output control with the scattered field algorithm.
Refer to Acoustics in the Mechanical APDL Theory Reference for more
information about pure scattered field formulation.
"""
command = f"AWAVE,{wavenum},{wavetype},{opt1},{opt2},{val1},{val2},{val3},{val4},{val5},{val6},{val7},{val8},{val9},{val10},{val11},{val12},{val13}"
return self.run(command, **kwargs)
def biot(self, label="", **kwargs):
"""Calculates the Biot-Savart source magnetic field intensity.
APDL Command: BIOT
Parameters
----------
label
Controls the Biot-Savart calculation:
NEW - Calculate the magnetic source field intensity (Hs) from the selected set of
source elements to the selected set of nodes. Overwrite any
existing Hs field values.
SUM - Calculate the Hs field from the selected set of source elements to the selected
set of nodes. Accumulate with any existing Hs field values.
Notes
-----
Calculates the Biot-Savart source magnetic field intensity (Hs) at the
selected nodes from the selected source elements. The calculation is
done at the time the BIOT command is issued.
Source elements include primitives described by element SOURC36, and
coupled-field elements SOLID5, LINK68, and SOLID98. Current conduction
elements do not have a solved-for current distribution from which to
calculate a source field until after the first substep. Inclusion of a
current conduction element Hs field will require a subsequent BIOT,SUM
command (with SOURC36 elements unselected) and a SOLVE command.
The units of Hs are as specified by the current EMUNIT command setting.
This command is also valid in PREP7.
"""
command = f"BIOT,{label}"
return self.run(command, **kwargs)
def dfswave(
self,
kcn="",
radius="",
psdref="",
dens="",
sonic="",
incang="",
npara="",
sampopt="",
**kwargs,
):
"""Specifies the incident planar waves with random phases for a diffuse
APDL Command: DFSWAVE
sound field.
Parameters
----------
kcn
Local coordinate system:
N - Coordinate system number. Default = 0.
DELETE - Delete defined incident diffused planar waves.
radius
Radius of the reference sphere on which the incident planar waves
are distributed with equal energy. Defaults to 50 x the half-
maximum dimension of the structural panel.
psdref
Reference power spectral density. Default = 1.
dens
Mass density of incident planar wave media. Default = 2041 kg/m3.
sonic
Sound speed in incident planar wave media. Default = 343.24 m/s)
incang
Maximum incident angle (0o <= degree <= 180o) against the positive
z axis in the local coordinate system KCN. Default = 0o.
npara
Number of divisions on the reference sphere with cutting planes
parallel to the x-y coordinate plane of the local coordinate
system. Default = 20.
sampopt
Random sampling option:
ALL - Initializes the random generator of incident planar wave phases and samples the
phases at each solving frequency.
MULT - Initializes the random generator of incident planar wave phases at the first
frequency and samples the phases at each solving frequency.
MONO - Initializes the random generator of incident planar wave phases and samples the
phases only once at first solving frequency so that the same
phases are used over the whole frequency range for each
incident planar wave.
Notes
-----
Issue the DFSWAVE command to activate a diffuse sound field. (The AWAVE
command does not activate a diffuse sound field.)
The SURF154 surface element must be defined on the surface of the
structural solid element for the excitation.
The acoustic elements and the absorbing boundary condition must be
defined in the open acoustic domain. Do not define the acoustic domain
on the excitation side.
The PLST command calculates the average transmission loss for multiple
sampling phases at each frequency over the frequency range.
The symmetry of a panel structure cannot be used to reduce the
simulation size, as the incident plane waves have varying random phase
angles. The z axis of the Cartesian coordinate system (KCN) must be
consistent with the panelโs outward normal unit vector at the center of
the panelโs sending side.
"""
command = (
f"DFSWAVE,{kcn},{radius},{psdref},{dens},{sonic},{incang},{npara},{sampopt}"
)
return self.run(command, **kwargs)
def fluread(
self, fname="", ext="", kdim="", kout="", limit="", listopt="", **kwargs
):
"""Reads one-way Fluent-to-Mechanical APDL coupling data via a .cgns file
APDL Command: FLUREAD
with one-side fast Fourier transformation complex pressure peak value.
Parameters
----------
--
Reserved.
fname
File name and directory path of a one-way Fluent-to-Mechanical APDL
coupling data file (248 characters maximum, including the
characters needed for the directory path). An unspecified directory
path defaults to the working directory; in this case, you can use
all 248 characters for the file name. Defaults to jobname.
ext
File name extension of the one-way Fluent-to-Mechanical APDL
coupling data file. Defaults to .cgns).
kdim
Interpolation data for mapping. A value of 0 (default) or 2 applies
2-D interpolation (where interpolation occurs on a surface).
kout
Outside region results for mapping:
0 - Use the value(s) of the nearest region point for points outside of the region.
This behavior is the default.
1 - Set results extrapolated outside of the region to zero.
limit
Number of nearby nodes considered for mapping interpolation.
Minimum = 5. Default = 20.
listopt
Type of items picked:
(blank) - No listing (default).
SOURCE - List the node coordinates and complex pressure values on the Fluent source side
during the solution.
TARGET - List the node coordinates and complex pressure values on the mapped Mechanical
APDL target side during the solution.
BOTH - List the node coordinates and complex pressure values on both the Fluent source
side and the mapped Mechanical APDL target side during the
solution.
Notes
-----
The FLUREAD command reads one-way Fluent-to-Mechanical APDL coupling
data from a .cgns file. The Fluent one-side fast Fourier transformation
(FFT) peak complex pressure values are mapped to the Mechanical APDL
structure model during the acoustic-structural solution at each FFT
frequency.
The command can be used only for the model with the acoustic elements.
To apply complex pressure to the structure model, define the SURF154
surface element, then define the one-way coupling interface (SF,,FSIN)
on the element.
You can define the solving frequency range via the HARFRQ command. The
solver selects the FFT frequencies between the beginning and ending
frequencies. The number of substeps is determined by the number of FFT
frequencies over the frequency range. The number of substeps defined
via the NSUBST command is overwritten.
For better mapping performance, consider the following:
Calculations for out-of-bound points require much more processing time
than do points that are within bounds.
For each point in the structural destination mesh, the command searches
all possible triangles in the Fluent source mesh to find the best
triangle containing each point, then performs a linear interpolation
inside this triangle. For faster and more accurate results, consider
your interpolation method and search criteria carefully. (See LIMIT.)
It is possible to apply one-way coupling excitation to multiple
frequencies. The one-side FFT peak complex pressure values are
necessary to do so.
"""
command = f"FLUREAD,{fname},{ext},{kdim},{kout},{limit},{listopt}"
return self.run(command, **kwargs)
def ic(self, node="", lab="", value="", value2="", nend="", ninc="", **kwargs):
"""Specifies initial conditions at nodes.
APDL Command: IC
Parameters
----------
node
Node at which initial condition is to be specified. If ALL, apply
to all selected nodes (NSEL). If NODE = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may be substituted for NODE.
lab
Degree-of-freedom label for which the initial condition is to be
specified. If ALL, use all appropriate labels.
value
Initial value of the degree of freedom (first-order value).
Defaults to the program default for that degree of freedom (0.0 for
structural analysis, TUNIF for thermal analysis, etc.). Values are
in the nodal coordinate system and in radians for rotational
degrees of freedom.
value2
Second-order degree of freedom value, mainly used to specify
initial structural velocity. Defaults to the program default for
that degree of freedom (0.0 for structural analysis). Values are
in the nodal coordinate system and in radians/time for rotational
degrees of freedom.
nend, ninc
Specifies the same initial condition values at the range of nodes
from NODE to NEND (defaults to NODE), in steps of NINC (defaults to
1).
Notes
-----
The IC command specifies initial conditions, which are the initial
values of the specified degrees of freedom. It is valid only for a
static analysis and full method transient analysis (TIMINT,ON and
TRNOPT,FULL). For the transient, the initial value is specified at the
beginning of the first load step, that is, at time = 0.0.
Initial conditions should always be step applied (KBC,1) and not
ramped.
If constraints (D, DSYM, etc.) and initial conditions are applied at
the same node, the constraint specification overrides. Exercise caution
when specifying constraints. The degree-of-freedom values start from
zero, or the first value given in the table when table name is
specified. To match the nonzero initial condition value with the
initial value for degree-of-freedom constraint, use a table for the
degree-of-freedom constraint.
For thermal analyses, any TUNIF specification should be applied before
the IC command; otherwise, the TUNIF specification is ignored. If the
IC command is input before any TUNIF specification, use the ICDELE
command and then reissue any TUNIF specification and then follow with
the IC command.
When issuing the IC command for elements SOLID278 Layered Thermal Solid
and SOLID279 Layered Thermal Solid with through-the-thickness degrees
of freedom (KEYOPT(3) = 2), layers are always interpolated linearly
based on the location of the degrees of freedom.
Define consistent initial conditions. For example, if you define an
initial velocity at a single degree of freedom, the initial velocity at
every other degree of freedom will be 0.0, potentially leading to
conflicting initial conditions. In most cases, you should define
initial conditions at every unconstrained degree of freedom in your
model. If you define an initial condition for any degree of freedom at
the pilot node of a rigid body (see Modeling Rigid Bodies in the
Contact Technology Guide for the definition of rigid body), then the
same initial condition must also be defined for the same degree of
freedom on all other nodes of the rigid body.
After a solution has been performed, the specified initial conditions
are overwritten by the actual solution and are no longer available. You
must respecify them if you want to perform a subsequent analysis. You
may want to keep a database file saved prior to the first solution for
subsequent reuse.
If you use the CDWRITE command to archive your model, first-order
values (initial displacements, temperatures, etc.) specified via the IC
command are not written to the archive file; however, second-order
(structural velocity) terms are written.
This command is also valid in PREP7.
"""
command = f"IC,{node},{lab},{value},{value2},{nend},{ninc}"
return self.run(command, **kwargs)
def icdele(self, **kwargs):
"""Deletes initial conditions at nodes.
APDL Command: ICDELE
Notes
-----
Deletes all initial conditions previously specified with the IC command
at all nodes.
This command is also valid in PREP7.
"""
command = f"ICDELE,"
return self.run(command, **kwargs)
def iclist(self, node1="", node2="", ninc="", lab="", **kwargs):
"""Lists the initial conditions.
APDL Command: ICLIST
Parameters
----------
node1, node2, ninc
List initial conditions for nodes NODE1 to NODE2 (defaults to
NODE1) in steps of NINC (defaults to 1). If NODE1 = ALL (default),
NODE2 and NINC are ignored and initial conditions for all selected
nodes [NSEL] are listed. If NODE1 = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may be substituted for NODE1 (NODE2 and
NINC are ignored).
lab
Velocity key:
DISP - Specification is for first order degree of freedom value (displacements,
temperature, etc.) (default).
VELO - Specification is for second order degree of freedom value (velocities).
Notes
-----
Lists the initial conditions specified by the IC command. Listing
applies to all the selected nodes [NSEL] and DOF labels. ICLIST is not
the same as the DLIST command. All the initial conditions including
the default conditions are listed for the selected nodes.
This command is valid in any processor.
"""
command = f"ICLIST,{node1},{node2},{ninc},{lab}"
return self.run(command, **kwargs)
def icrotate(
self,
node="",
omega="",
x1="",
y1="",
z1="",
x2="",
y2="",
z2="",
vx="",
vy="",
vz="",
accel="",
**kwargs,
):
"""Specifies initial velocity at nodes as a sum of rotation about an axis and translation.
APDL Command: ICROTATE
Parameters
----------
NODE
Node at which the initial velocity is to be specified. If ALL,
apply to all selected nodes NSEL. A component name may be
input for NODE.
OMEGA
Scalar rotational velocity about the rotational axis.
X1, Y1, Z1
Coordinates (in the global Cartesian coordinate system) of the
beginning point of the rotational axis vector.
X2, Y2, Z2
Coordinates (in the global Cartesian coordinate system) of the
end point of the rotational axis vector.
Vx
Initial translational velocity in direction x of the nodal
coordinate system.
Vy
Initial translational velocity in direction y of the nodal
coordinate system.
Vz
Initial translational velocity in direction z of the nodal
coordinate system.
accel
Key to initialize acceleration due to centrifugal effects:
* ``""`` : (blank) Do not initialize acceleration (default).
* ``"CENT"`` : Initialize acceleration due to centrifugal
effects along with the initial velocity.
Notes
-----
The ICROTATE command specifies initial velocity for all
translational degrees of freedom of the specified nodes. The
velocity value is a combination of velocity due to rotation about
an axis and translation.
"""
command = f"ICROTATE,{node},{omega},{x1},{y1},{z1},{x2},{y2},{z2},{vx},{vy},{vz},{accel}"
return self.run(command, **kwargs)
def mrpm(self, val1="", **kwargs):
"""Defines the revolutions per minute (RPM) for a machine rotation.
APDL Command: MRPM
Parameters
----------
val1
The RPM value (no default).
Notes
-----
A different RPM value can be defined at each load step. The RPM
value is used to postprocess the equivalent radiated power from
the structural surface (the PRAS and PLAS commands) or the
radiated sound power level (the PRFAR and PLFAR commands).
"""
return self.run(f"MRPM,{val1}", **kwargs)
def outpr(self, item="", freq="", cname="", **kwargs):
"""Controls the solution printout.
APDL Command: OUTPR
Parameters
----------
item
Item for print control:
BASIC - Basic quantities (nodal DOF solution, nodal reaction loads, and element
solution) (default).
NSOL - Nodal DOF solution.
RSOL - Nodal reaction loads.
ESOL - Element solution.
NLOAD - Element nodal loads. When nonlinear stabilization is active, the stabilization
force/moments are also printed.
SFOR - Stabilization force/moment at the applicable nodes (valid only when nonlinear
stabilization is active).
VENG - Element energies. When nonlinear stabilization is active, the energy
dissipation due to stabilization is also printed.
V - Nodal velocity (applicable to structural transient analysis only
(ANTYPE,TRANS)).
A - Nodal acceleration (applicable to structural transient analysis only
(ANTYPE,TRANS)).
ALL - All of the above solution items.
freq
Print solution for this item every Freqth (and the last) substep of
each load step. If -n, print up to n equally spaced solutions
(only applies to static or full transient analyses when automatic
time stepping is enabled). If NONE, suppress all printout for this
item for this load step. If ALL, print solution for this item for
every substep. If LAST, print solution for this item only for the
last substep of each load step. For a modal analysis, use NONE or
ALL.
cname
Name of the component, created with the CM command, defining the
selected set of nodes or elements for which this specification is
active. If blank, the set is all entities.
Notes
-----
Controls the solution items to be printed, the frequency with which
they are printed (in static, transient, or full harmonic analyses), and
the set of nodes or elements to which this specification applies (in
static, transient, or full harmonic analyses). An item is associated
with either a node (NSOL, RFORCE, V, and A items) or an element (all of
the remaining items). The specifications are processed in the order
that they are input. Up to 50 specifications (OUTPR and OUTRES
commands combined) may be defined. Use OUTPR,STAT to list the current
specifications and use OUTPR,ERASE to erase all the current
specifications.
As described above, OUTPR writes some or all items (depending on
analysis type) for all elements. To restrict the solution printout,
use OUTPR to selectively suppress (Freq = NONE) the writing of solution
data, or first suppress the writing of all solution data
(OUTPR,ALL,NONE) and then selectively turn on the writing of solution
data with subsequent OUTPR commands.
If the generalized plane strain feature is active and OUTPR is issued,
the change of fiber length at the ending point during deformation and
the rotation of the ending plane about X and Y during deformation will
be printed if any displacement at the nodes is printed. The reaction
forces at the ending point will be printed if any reaction force at the
nodes is printed.
Nodal reaction loads (Item = RSOL) are processed according to the
specifications listed for the PRRSOL command.
Result printouts for interactive sessions are suppressed for models
with more than 10 elements.
This command is also valid in PREP7.
"""
command = f"OUTPR,{item},{freq},{cname}"
return self.run(command, **kwargs)
def outres(self, item="", freq="", cname="", nsvar="", dsubres="", **kwargs):
"""Controls the solution data written to the database.
APDL Command: OUTRES
Parameters
----------
item
Results item for database and file write control:
ALL - All solution items except LOCI and SVAR. This behavior is the default.
CINT - All available results generated by the CINT command
ERASE - Resets OUTRES specifications to their default values.
STAT - Lists the current OUTRES specifications.
BASIC - Write only NSOL, RSOL, NLOAD, STRS, FGRAD, and FFLUX records to the results
file and database.
NSOL - Nodal DOF solution.
RSOL - Nodal reaction loads.
V - Nodal velocity (applicable to structural full transient analysis only
(ANTYPE,TRANS)).
A - Nodal acceleration (applicable to structural full transient analysis only
(ANTYPE,TRANS)).
ESOL - Element solution (includes all items following):
NLOAD - Element nodal, input constraint, and force loads (also used with the /POST1
commands PRRFOR, NFORCE, and FSUM to calculate reaction
loads).
STRS - Element nodal stresses.
EPEL - Element elastic strains.
EPTH - Element thermal, initial, and swelling strains.
EPPL - Element plastic strains.
EPCR - Element creep strains.
EPDI - Element diffusion strains.
FGRAD - Element nodal gradients.
FFLUX - Element nodal fluxes.
LOCI - Integration point locations.
SVAR - State variables (used only by UserMat).
MISC - Element miscellaneous data (SMISC and NMISC items of the ETABLE command).
freq
Specifies how often (that is, at which substeps) to write the
specified solution results item. The following values are valid:
cname
The name of the component, created with the CM command, defining
the selected set of elements or nodes for which this specification
is active. If blank, the set is all entities. A component name is
not allowed with the ALL, BASIC, or RSOL items.
--
Reserved for future use.
nsvar
The number of user-defined state variables (TB,STATE) to be written
to the results file. Valid only when Item = SVAR and user-defined
state variables exist. The specified value cannot exceed the total
number of state variables defined; if no value is specified, all
user-defined state variables are written to the results file. This
argument acts on all sets of user-defined state variables that
exist for the model.
dsubres
Specifies whether to write additional results in Jobname.DSUB
during a substructure or CMS use pass in transient or harmonic
analysis.
Blank - Write the nodal DOF solution in Jobname.DSUB (default).
ALL - In addition to the nodal DOF solution, also write necessary data to compute
quantities using nodal velocity and nodal acceleration
(damping force, inertial force, kinetic energy, etc.) in the
subsequent expansion pass. For more information, see Step 3:
Expansion Pass in the Substructuring Analysis Guide.
Notes
-----
The OUTRES command allows you to specify the following:
The solution item (Item) to write to the database (and to the reduced
displacement and results files)
The frequency (Freq) at which the solution item is written (applicable
to static, transient, or full harmonic analyses)
The set of elements or nodes (Cname) to which your specification
applies.
"""
command = f"OUTRES,{item},{freq},{cname},{nsvar},{dsubres}"
return self.run(command, **kwargs)
def rescontrol(self, action="", ldstep="", frequency="", maxfiles="", **kwargs):
"""Controls file writing for multiframe restarts.
APDL Command: RESCONTROL
Parameters
----------
action
Command action. Valid options are:
DEFINE - Issuing the command specifies how frequently the .Xnnn restart files are
written for a load step (default).
FILE_SUMMARY - Issuing the command prints the substep and load step information for all .Xnnn
files for the current jobname in the current
directory. If this option is specified, all other
arguments are ignored.
STATUS - Issuing the command lists the current status in the tables of restart controls
specified previously by RESCONTROL. If this option is
specified, all other arguments are ignored.
NORESTART - Issuing the command cleans up some of the restart files after a Distributed
ANSYS solution. The host process will not have the
following files in the working directory at the end of
the run: .ESAV, .OSAV, .Xnnn, .RDB, .LDHI. The slave
processes will not have the following files in the
working directory at the end of the run: .ESAV, .OSAV,
.Xnnn, .RST (or .RTH, etc.). Some of the restart files
are never written, some are removed upon leaving /SOLU
(for example, upon FINISH), and some are removed upon
exiting the program.
This option is useful for cleaning up files written by all of the Distributed ANSYS processes, particularly when you know that these restart files will not be needed later on. If this option is specified, all other arguments are ignored. - If this option is used in shared-memory parallel ANSYS, most of the restart
files in the working directory are removed. It
has the same effect as issuing RESCONTROL,,NONE.
LINEAR - Issuing the command specifies the same actions as Action = DEFINE. However,
this option is intended for linear static applications.
For a linear static analysis, the restart capability is
normally not needed. However, it is typically needed when
a subsequent linear perturbation analysis is desired. By
default, none of the restart files are written for a
linear static analysis.
DELETE - Delete the restart control specification corresponding to the Ldstep label on a
previous RESCONTROL,DEFINE command.
ldstep
Specifies how the .Xnnn files are written for the specified load
steps. This option also affects how often the load history
information is written to the .LDHI file.
ALL - Write the .Xnnn files at the same substep Frequency for all load steps; write
the load history information to the .LDHI file for all load
steps.
LAST - Write the .Xnnn files for the last load step only; write load history
information to the .LDHI file for the last load step only.
This option is the default for nonlinear static and full
transient analyses. The remaining arguments are ignored.
N - Number that indicates how often the .Xnnn file is written.
Input a positive number to write the .Xnnn files at the substep Frequency indicated only for load step N. Other load steps will be written at the default substep frequency or at a frequency defined by a previous RESCONTROL specification. Load history information is written to the .LDHI file only for load steps N. - Input a negative number (-N) to write the .Xnnn files for every Nth load step
at the specified substep Frequency. The load
history information is written to the .LDHI file
every Nth load step. This option is suitable for
restart applications in which more than a few
hundred load steps are required. Compared to the
ALL and positive N options, it can save disk
space since the .LDHI file is smaller and fewer
.Xnnn files are written.
If Ldstep = -N, all other Ldstep options specified by RESCONTROL are ignored and the program follows the -N option (write load history information every Nth load step). If you want to change this pattern, issue RESCONTROL,DELETE, -N and then issue another RESCONTROL command with the desired Ldstep option. - NONE
No multiframe restart files (.RDB [restart database file], .LDHI [load history file], .Xnnn) are created. This option is the default for mode-superposition analyses. The remaining arguments are ignored. - For nonlinear static, linear static, and full transient analyses, this option
allows a restart to be done at the last or abort
point using the same procedure as in ANSYS 5.5 or
earlier (using the .EMAT, .ESAV or .OSAV, and .DB
files).
frequency
Frequency at which the .Xnnn files are written at the substep
level.
NONE - Do not write any .Xnnn files for this load step.
LAST - Write the .Xnnn files for the last substep of the load step only (default for
nonlinear static and full transient analyses).
N - If N is positive, write the .Xnnn file every Nth substep of a load step. If N
is negative, write N equally spaced .Xnnn files within a load
step.
In nonlinear static and full transient analyses, negative N is valid only when AUTOTS,ON. - In mode-superposition analyses, negative N is always valid.
maxfiles
Maximum number of .Xnnn files to save for Ldstep.
-1 - Overwrite existing .Xnnn files (default). The total maximum number of .Xnnn
files for one run is 999. If this number is reached before the
analysis is complete, the program will reset the .Xnnn file
numbering back to 1 and continue to write .Xnnn files; the
program keeps the newest 999 restart files and overwrites the
oldest restart files.
0 - Do not overwrite any existing .Xnnn files. The total maximum number of .Xnnn
files for one run is 999. If this number is reached before the
analysis is complete, the analysis continues but no longer
writes any .Xnnn files.
"""
command = f"RESCONTROL,{action},{ldstep},{frequency},{maxfiles}"
return self.run(command, **kwargs)
def sbclist(self, **kwargs):
"""Lists solid model boundary conditions.
APDL Command: SBCLIST
Notes
-----
Lists all solid model boundary conditions for the selected solid model
entities. See also DKLIST, DLLIST, DALIST, FKLIST, SFLLIST, SFALIST,
BFLLIST, BFALIST, BFVLIST, and BFKLIST to list items separately.
This command is valid in any processor.
"""
command = f"SBCLIST,"
return self.run(command, **kwargs)
def sbctran(self, **kwargs):
"""Transfers solid model loads and boundary conditions to the FE model.
APDL Command: SBCTRAN
Notes
-----
Causes a manual transfer of solid model loads and boundary conditions
to the finite element model. Loads and boundary conditions on
unselected keypoints, lines, areas, and volumes are not transferred.
Boundary conditions and loads will not be transferred to unselected
nodes or elements. The SBCTRAN operation is also automatically done
upon initiation of the solution calculations [SOLVE].
This command is also valid in PREP7.
"""
command = f"SBCTRAN,"
return self.run(command, **kwargs)
def wsprings(self, **kwargs):
"""Creates weak springs on corner nodes of a bounding box of the currently
APDL Command: WSPRINGS
selected elements.
Notes
-----
WSPRINGS invokes a predefined ANSYS macro that is used during the
import of loads from the ADAMS program into the ANSYS program. WSPRINGS
creates weak springs on the corner nodes of the bounding box of the
currently selected elements. The six nodes of the bounding box are
attached to ground using COMBIN14 elements. The stiffness is chosen as
a small number and can be changed by changing the real constants of the
COMBIN14 elements. This command works only for models that have a
geometric extension in two or three dimensions. One dimensional
problems (pure beam in one axis) are not supported.
For more information on how WSPRINGS is used during the transfer of
loads from the ADAMS program to ANSYS, see Import Loads into ANSYS in
the Substructuring Analysis Guide.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"WSPRINGS,"
return self.run(command, **kwargs) | ansys/mapdl/core/_commands/solution/miscellaneous_loads.py | class MiscellaneousLoads:
def anpres(self, nfram="", delay="", ncycl="", refframe="", **kwargs):
"""Produces an animated sequence of the time-harmonic pressure variation
APDL Command: ANPRES
of an engine-order excitation in a cyclic harmonic analysis.
Parameters
----------
nfram
Number of frame captures per cycle. Defaults to 3 times the number
of sectors.
delay
Time delay (seconds) during animation. Defaults to 0.1 seconds.
ncycl
Number of animation cycles. Defaults to 5.
refframe
Reference frame for the model rotation.
0 - Rotating reference frame (default). The model remains fixed in space and the
pressure revolve around the model.
1 - Stationary reference frame. The model rotates and the pressure locations remain
fixed in space.
Notes
-----
ANPRES invokes a macro which produces an animated sequence of the time-
harmonic applied pressure in the case of a mode-superposition harmonic
analysis (ANTYPE,HARMIC with CYCOPT,MSUP,ON). The engine-order
excitation must also have been specified (CYCFREQ,EO). While pressure
loads are not accepted as valid loading in a mode-superposition
analysis (they must be applied in the modal analysis and the modal load
vector applied in the mode-superposition analysis) you can apply them
for the purposes of this animation.
For RefFrame = 1 (stationary reference frame), the rotational velocity
from the Linear Perturbation step, or the current OMEGA or CGOMGA
value, is used to determine the rotation direction about the cyclic
cylindrical axis, otherwise a positive rotation is assumed.
You may use /HBC,,ON to hide overlapping pressure faces, and use
/GLINE,,-1 to suppress the element outlines if desired.
"""
command = f"ANPRES,{nfram},{delay},{ncycl},{refframe}"
return self.run(command, **kwargs)
def aport(
self,
portnum="",
label="",
kcn="",
pres="",
phase="",
val1="",
val2="",
val3="",
val4="",
**kwargs,
):
"""Specifies input data for plane wave and acoustic duct ports.
APDL Command: APORT
Parameters
----------
portnum
Port number. This number is associated with an exterior port
or interior port previously specified by the SF and BF family
of commands, respectively. The number must be between 1 and
50.
Label
* ``"PLAN"`` : Incident plane wave.
* ``"RECT"`` : Rectangular duct.
* ``"CIRC"`` : Circular duct.
* ``"COAX"`` : Coaxial duct.
* ``"LIST"`` : List the port settings. If PortNum = ALL, list the port settings for all defined ports.
* ``"DELE"`` : Delete defined ports. If PortNum = ALL, delete all defined ports.
kcn
A previously-defined local (KCN >10) or global (KCN = 0)
Cartesian coordinate system number used to specify the
geometric properties of the duct. Defaults to the global
Cartesian coordinate system (0). The local Z-direction must be
the direction of wave propagation. The origin of the local
coordinate system must be centered about the face of the duct
port without considering symmetry.
pres
Zero-to-peak amplitude of the pressure. If blank, the port
will appear as a matching impedance.
phase
Phase angle of the applied pressure in degrees. Defaults to 0.
VAL1, VAL2, VAL3, VAL4
Additional input. The meaning of VAL1 through VAL4 varies
depending on the specified Label. If ``label="PLAN"``:
* ``"VAL1"`` : angle from positive X-axis to positive Y-axis
in the local Cartesian coordinates (KCN).
* ``"VAL2"`` : angle away from positive Z-axis in the local
Cartesian coordinates (KCN).
if ``label="RECT"``:
* ``"VAL1"`` : Width of the rectangular duct.
* ``"VAL2"`` : Height of the rectangular duct.
* ``"VAL3"`` : Mode index for pressure variation along the
width (defaults to 0).
* ``"VAL4"`` : Mode index for pressure variation along the
height (defaults to 0).
if ``label="CIRC"``:
* ``"VAL1"`` : Radius of the circular duct.
* ``"VAL2"`` : Not used.
* ``"VAL3"`` : Mode index for pressure variation along the
azimuth (defaults to 0).
* ``"VAL4"`` : Mode index for pressure variation along the
radii (defaults to 0).
if ``label="COAX"``:
* ``"VAL1"`` : Inner radius of the coaxial duct.
* ``"VAL2"`` : Outer radius of the coaxial duct.
* ``"VAL3"`` : Mode index for pressure variation along the
azimuth (defaults to 0).
* ``"VAL4"`` : Mode index for pressure variation along the
radii (defaults to 0).
Notes
-----
Use the APORT command to launch a specified analytic acoustic mode
into a guided duct.
The low-order FLUID30 element does not support the higher modes in
the coaxial duct ``label="COAX"``.
For more information, see Specified Mode Excitation in an Acoustic
Duct in the Acoustic Analysis Guide, and Analytic Port Modes in a
Duct in the Mechanical APDL Theory Reference.
"""
command = (
f"APORT,{portnum},{label},{kcn},{pres},{phase},,{val1},{val2},{val3},{val4}"
)
return self.run(command, **kwargs)
def asifile(
self, opt="", fname="", ext="", oper="", kdim="", kout="", limit="", **kwargs
):
"""Writes or reads one-way acoustic-structural coupling data.
APDL Command: ASIFILE
Parameters
----------
opt
Command behavior option:
WRITE - Write the structural results to the specified file.
READ - Read the structural results from the specified file.
fname
File name and directory path of a one-way acoustic-structural
coupling data file (248 characters maximum, including the
characters needed for the directory path). An unspecified directory
path defaults to the working directory; in this case, you can use
all 248 characters for the file name (defaults to jobname).
ext
File name extension of the one-way acoustic-structural coupling
data file (defaults to .asi).
oper
Command operation:
NOMAP - No mapping occurs between the structural and acoustic models when reading the
structural results from the specified file (default).
MAP - Maps the results from the structural to the acoustic model. (See "Notes".)
kdim
Interpolation criteria. Valid only when Oper = MAP.
kout
Outside region results. Valid only when Oper = MAP.
limit
Number of nearby nodes considered for interpolation. Valid only
when Oper = MAP.
Notes
-----
The ASIFILE command writes to, or reads from, a file containing one-way
acoustic-structural coupling data.
Results data on the one-way coupling interface (defined by the
SF,,FSIN) in the structural model are written to the one-way coupling
result data file during the structural solution.
One-way coupling results data are read into the acoustic model as the
velocity (harmonic) or acceleration (transient) excitation during the
sequential acoustic solution.
If Oper = NOMAP, both structural and acoustic models must share the
same node number on the one-way coupling interface.
If Oper = MAP:
The one-way coupling interface must be defined in the acoustic model
(SF,,FSIN) such that it corresponds to the field-surface interface
number (FSIN) in the structural model.
The output points are correct only if they are within the boundaries
set via the specified input points.
Calculations for out-of-bound points require much more processing time
than do points that are within bounds.
For each point in the acoustic destination mesh, the command searches
all possible triangles in the structural source mesh to find the best
triangle containing each point, then performs a linear interpolation
inside this triangle. For faster and more accurate results, consider
your interpolation method and search criteria carefully (see LIMIT).
One-way coupling excitation can be applied to multiple frequencies or
time steps.
"""
command = f"ASIFILE,{opt},{fname},{ext},{oper},{kdim},{kout},{limit}"
return self.run(command, **kwargs)
def awave(
self,
wavenum="",
wavetype="",
opt1="",
opt2="",
val1="",
val2="",
val3="",
val4="",
val5="",
val6="",
val7="",
val8="",
val9="",
val10="",
val11="",
val12="",
val13="",
**kwargs,
):
"""Specifies input data for an acoustic incident wave.
APDL Command: AWAVE
Parameters
----------
wavenum
Wave number. You specify the integer number for an acoustic
incident wave inside or outside the model. The number must be
between 1 and 20.
wavetype
Wave type:
PLAN - Planar incident wave
MONO - Monopole or pulsating sphere incident wave
DIPO - Dipole incident wave
BACK - Back enclosed loudspeaker
BARE - Bare loudspeaker
STATUS - Displays the status of the acoustic wave settings if Wavenum = a number between
1 and 20 or ALL.
DELE - Deletes the acoustic wave settings if Wavenum = a number between 1 and 20 or
ALL.
opt1
PRES
PRES - Pressure
VELO - Velocity
opt2
EXT
EXT - Incident wave outside the model.
INT - Incident wave inside the model. This option is only available for pure
scattered pressure formulation.
val1, val2, val3, . . . , val13
If Wavetype = PLAN, MONO, DIPO, BACK, or BARE:
VAL1 - Amplitude of pressure or normal velocity to the sphere surface.
VAL2 - Phase angle of the applied pressure or velocity (in degrees). Defaults to 0
degrees.
Notes
-----
Use the ASOL command to activate the scattered field algorithm and the
ASCRES command for output control with the scattered field algorithm.
Refer to Acoustics in the Mechanical APDL Theory Reference for more
information about pure scattered field formulation.
"""
command = f"AWAVE,{wavenum},{wavetype},{opt1},{opt2},{val1},{val2},{val3},{val4},{val5},{val6},{val7},{val8},{val9},{val10},{val11},{val12},{val13}"
return self.run(command, **kwargs)
def biot(self, label="", **kwargs):
"""Calculates the Biot-Savart source magnetic field intensity.
APDL Command: BIOT
Parameters
----------
label
Controls the Biot-Savart calculation:
NEW - Calculate the magnetic source field intensity (Hs) from the selected set of
source elements to the selected set of nodes. Overwrite any
existing Hs field values.
SUM - Calculate the Hs field from the selected set of source elements to the selected
set of nodes. Accumulate with any existing Hs field values.
Notes
-----
Calculates the Biot-Savart source magnetic field intensity (Hs) at the
selected nodes from the selected source elements. The calculation is
done at the time the BIOT command is issued.
Source elements include primitives described by element SOURC36, and
coupled-field elements SOLID5, LINK68, and SOLID98. Current conduction
elements do not have a solved-for current distribution from which to
calculate a source field until after the first substep. Inclusion of a
current conduction element Hs field will require a subsequent BIOT,SUM
command (with SOURC36 elements unselected) and a SOLVE command.
The units of Hs are as specified by the current EMUNIT command setting.
This command is also valid in PREP7.
"""
command = f"BIOT,{label}"
return self.run(command, **kwargs)
def dfswave(
self,
kcn="",
radius="",
psdref="",
dens="",
sonic="",
incang="",
npara="",
sampopt="",
**kwargs,
):
"""Specifies the incident planar waves with random phases for a diffuse
APDL Command: DFSWAVE
sound field.
Parameters
----------
kcn
Local coordinate system:
N - Coordinate system number. Default = 0.
DELETE - Delete defined incident diffused planar waves.
radius
Radius of the reference sphere on which the incident planar waves
are distributed with equal energy. Defaults to 50 x the half-
maximum dimension of the structural panel.
psdref
Reference power spectral density. Default = 1.
dens
Mass density of incident planar wave media. Default = 2041 kg/m3.
sonic
Sound speed in incident planar wave media. Default = 343.24 m/s)
incang
Maximum incident angle (0o <= degree <= 180o) against the positive
z axis in the local coordinate system KCN. Default = 0o.
npara
Number of divisions on the reference sphere with cutting planes
parallel to the x-y coordinate plane of the local coordinate
system. Default = 20.
sampopt
Random sampling option:
ALL - Initializes the random generator of incident planar wave phases and samples the
phases at each solving frequency.
MULT - Initializes the random generator of incident planar wave phases at the first
frequency and samples the phases at each solving frequency.
MONO - Initializes the random generator of incident planar wave phases and samples the
phases only once at first solving frequency so that the same
phases are used over the whole frequency range for each
incident planar wave.
Notes
-----
Issue the DFSWAVE command to activate a diffuse sound field. (The AWAVE
command does not activate a diffuse sound field.)
The SURF154 surface element must be defined on the surface of the
structural solid element for the excitation.
The acoustic elements and the absorbing boundary condition must be
defined in the open acoustic domain. Do not define the acoustic domain
on the excitation side.
The PLST command calculates the average transmission loss for multiple
sampling phases at each frequency over the frequency range.
The symmetry of a panel structure cannot be used to reduce the
simulation size, as the incident plane waves have varying random phase
angles. The z axis of the Cartesian coordinate system (KCN) must be
consistent with the panelโs outward normal unit vector at the center of
the panelโs sending side.
"""
command = (
f"DFSWAVE,{kcn},{radius},{psdref},{dens},{sonic},{incang},{npara},{sampopt}"
)
return self.run(command, **kwargs)
def fluread(
self, fname="", ext="", kdim="", kout="", limit="", listopt="", **kwargs
):
"""Reads one-way Fluent-to-Mechanical APDL coupling data via a .cgns file
APDL Command: FLUREAD
with one-side fast Fourier transformation complex pressure peak value.
Parameters
----------
--
Reserved.
fname
File name and directory path of a one-way Fluent-to-Mechanical APDL
coupling data file (248 characters maximum, including the
characters needed for the directory path). An unspecified directory
path defaults to the working directory; in this case, you can use
all 248 characters for the file name. Defaults to jobname.
ext
File name extension of the one-way Fluent-to-Mechanical APDL
coupling data file. Defaults to .cgns).
kdim
Interpolation data for mapping. A value of 0 (default) or 2 applies
2-D interpolation (where interpolation occurs on a surface).
kout
Outside region results for mapping:
0 - Use the value(s) of the nearest region point for points outside of the region.
This behavior is the default.
1 - Set results extrapolated outside of the region to zero.
limit
Number of nearby nodes considered for mapping interpolation.
Minimum = 5. Default = 20.
listopt
Type of items picked:
(blank) - No listing (default).
SOURCE - List the node coordinates and complex pressure values on the Fluent source side
during the solution.
TARGET - List the node coordinates and complex pressure values on the mapped Mechanical
APDL target side during the solution.
BOTH - List the node coordinates and complex pressure values on both the Fluent source
side and the mapped Mechanical APDL target side during the
solution.
Notes
-----
The FLUREAD command reads one-way Fluent-to-Mechanical APDL coupling
data from a .cgns file. The Fluent one-side fast Fourier transformation
(FFT) peak complex pressure values are mapped to the Mechanical APDL
structure model during the acoustic-structural solution at each FFT
frequency.
The command can be used only for the model with the acoustic elements.
To apply complex pressure to the structure model, define the SURF154
surface element, then define the one-way coupling interface (SF,,FSIN)
on the element.
You can define the solving frequency range via the HARFRQ command. The
solver selects the FFT frequencies between the beginning and ending
frequencies. The number of substeps is determined by the number of FFT
frequencies over the frequency range. The number of substeps defined
via the NSUBST command is overwritten.
For better mapping performance, consider the following:
Calculations for out-of-bound points require much more processing time
than do points that are within bounds.
For each point in the structural destination mesh, the command searches
all possible triangles in the Fluent source mesh to find the best
triangle containing each point, then performs a linear interpolation
inside this triangle. For faster and more accurate results, consider
your interpolation method and search criteria carefully. (See LIMIT.)
It is possible to apply one-way coupling excitation to multiple
frequencies. The one-side FFT peak complex pressure values are
necessary to do so.
"""
command = f"FLUREAD,{fname},{ext},{kdim},{kout},{limit},{listopt}"
return self.run(command, **kwargs)
def ic(self, node="", lab="", value="", value2="", nend="", ninc="", **kwargs):
"""Specifies initial conditions at nodes.
APDL Command: IC
Parameters
----------
node
Node at which initial condition is to be specified. If ALL, apply
to all selected nodes (NSEL). If NODE = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may be substituted for NODE.
lab
Degree-of-freedom label for which the initial condition is to be
specified. If ALL, use all appropriate labels.
value
Initial value of the degree of freedom (first-order value).
Defaults to the program default for that degree of freedom (0.0 for
structural analysis, TUNIF for thermal analysis, etc.). Values are
in the nodal coordinate system and in radians for rotational
degrees of freedom.
value2
Second-order degree of freedom value, mainly used to specify
initial structural velocity. Defaults to the program default for
that degree of freedom (0.0 for structural analysis). Values are
in the nodal coordinate system and in radians/time for rotational
degrees of freedom.
nend, ninc
Specifies the same initial condition values at the range of nodes
from NODE to NEND (defaults to NODE), in steps of NINC (defaults to
1).
Notes
-----
The IC command specifies initial conditions, which are the initial
values of the specified degrees of freedom. It is valid only for a
static analysis and full method transient analysis (TIMINT,ON and
TRNOPT,FULL). For the transient, the initial value is specified at the
beginning of the first load step, that is, at time = 0.0.
Initial conditions should always be step applied (KBC,1) and not
ramped.
If constraints (D, DSYM, etc.) and initial conditions are applied at
the same node, the constraint specification overrides. Exercise caution
when specifying constraints. The degree-of-freedom values start from
zero, or the first value given in the table when table name is
specified. To match the nonzero initial condition value with the
initial value for degree-of-freedom constraint, use a table for the
degree-of-freedom constraint.
For thermal analyses, any TUNIF specification should be applied before
the IC command; otherwise, the TUNIF specification is ignored. If the
IC command is input before any TUNIF specification, use the ICDELE
command and then reissue any TUNIF specification and then follow with
the IC command.
When issuing the IC command for elements SOLID278 Layered Thermal Solid
and SOLID279 Layered Thermal Solid with through-the-thickness degrees
of freedom (KEYOPT(3) = 2), layers are always interpolated linearly
based on the location of the degrees of freedom.
Define consistent initial conditions. For example, if you define an
initial velocity at a single degree of freedom, the initial velocity at
every other degree of freedom will be 0.0, potentially leading to
conflicting initial conditions. In most cases, you should define
initial conditions at every unconstrained degree of freedom in your
model. If you define an initial condition for any degree of freedom at
the pilot node of a rigid body (see Modeling Rigid Bodies in the
Contact Technology Guide for the definition of rigid body), then the
same initial condition must also be defined for the same degree of
freedom on all other nodes of the rigid body.
After a solution has been performed, the specified initial conditions
are overwritten by the actual solution and are no longer available. You
must respecify them if you want to perform a subsequent analysis. You
may want to keep a database file saved prior to the first solution for
subsequent reuse.
If you use the CDWRITE command to archive your model, first-order
values (initial displacements, temperatures, etc.) specified via the IC
command are not written to the archive file; however, second-order
(structural velocity) terms are written.
This command is also valid in PREP7.
"""
command = f"IC,{node},{lab},{value},{value2},{nend},{ninc}"
return self.run(command, **kwargs)
def icdele(self, **kwargs):
"""Deletes initial conditions at nodes.
APDL Command: ICDELE
Notes
-----
Deletes all initial conditions previously specified with the IC command
at all nodes.
This command is also valid in PREP7.
"""
command = f"ICDELE,"
return self.run(command, **kwargs)
def iclist(self, node1="", node2="", ninc="", lab="", **kwargs):
"""Lists the initial conditions.
APDL Command: ICLIST
Parameters
----------
node1, node2, ninc
List initial conditions for nodes NODE1 to NODE2 (defaults to
NODE1) in steps of NINC (defaults to 1). If NODE1 = ALL (default),
NODE2 and NINC are ignored and initial conditions for all selected
nodes [NSEL] are listed. If NODE1 = P, graphical picking is
enabled and all remaining command fields are ignored (valid only in
the GUI). A component name may be substituted for NODE1 (NODE2 and
NINC are ignored).
lab
Velocity key:
DISP - Specification is for first order degree of freedom value (displacements,
temperature, etc.) (default).
VELO - Specification is for second order degree of freedom value (velocities).
Notes
-----
Lists the initial conditions specified by the IC command. Listing
applies to all the selected nodes [NSEL] and DOF labels. ICLIST is not
the same as the DLIST command. All the initial conditions including
the default conditions are listed for the selected nodes.
This command is valid in any processor.
"""
command = f"ICLIST,{node1},{node2},{ninc},{lab}"
return self.run(command, **kwargs)
def icrotate(
self,
node="",
omega="",
x1="",
y1="",
z1="",
x2="",
y2="",
z2="",
vx="",
vy="",
vz="",
accel="",
**kwargs,
):
"""Specifies initial velocity at nodes as a sum of rotation about an axis and translation.
APDL Command: ICROTATE
Parameters
----------
NODE
Node at which the initial velocity is to be specified. If ALL,
apply to all selected nodes NSEL. A component name may be
input for NODE.
OMEGA
Scalar rotational velocity about the rotational axis.
X1, Y1, Z1
Coordinates (in the global Cartesian coordinate system) of the
beginning point of the rotational axis vector.
X2, Y2, Z2
Coordinates (in the global Cartesian coordinate system) of the
end point of the rotational axis vector.
Vx
Initial translational velocity in direction x of the nodal
coordinate system.
Vy
Initial translational velocity in direction y of the nodal
coordinate system.
Vz
Initial translational velocity in direction z of the nodal
coordinate system.
accel
Key to initialize acceleration due to centrifugal effects:
* ``""`` : (blank) Do not initialize acceleration (default).
* ``"CENT"`` : Initialize acceleration due to centrifugal
effects along with the initial velocity.
Notes
-----
The ICROTATE command specifies initial velocity for all
translational degrees of freedom of the specified nodes. The
velocity value is a combination of velocity due to rotation about
an axis and translation.
"""
command = f"ICROTATE,{node},{omega},{x1},{y1},{z1},{x2},{y2},{z2},{vx},{vy},{vz},{accel}"
return self.run(command, **kwargs)
def mrpm(self, val1="", **kwargs):
"""Defines the revolutions per minute (RPM) for a machine rotation.
APDL Command: MRPM
Parameters
----------
val1
The RPM value (no default).
Notes
-----
A different RPM value can be defined at each load step. The RPM
value is used to postprocess the equivalent radiated power from
the structural surface (the PRAS and PLAS commands) or the
radiated sound power level (the PRFAR and PLFAR commands).
"""
return self.run(f"MRPM,{val1}", **kwargs)
def outpr(self, item="", freq="", cname="", **kwargs):
"""Controls the solution printout.
APDL Command: OUTPR
Parameters
----------
item
Item for print control:
BASIC - Basic quantities (nodal DOF solution, nodal reaction loads, and element
solution) (default).
NSOL - Nodal DOF solution.
RSOL - Nodal reaction loads.
ESOL - Element solution.
NLOAD - Element nodal loads. When nonlinear stabilization is active, the stabilization
force/moments are also printed.
SFOR - Stabilization force/moment at the applicable nodes (valid only when nonlinear
stabilization is active).
VENG - Element energies. When nonlinear stabilization is active, the energy
dissipation due to stabilization is also printed.
V - Nodal velocity (applicable to structural transient analysis only
(ANTYPE,TRANS)).
A - Nodal acceleration (applicable to structural transient analysis only
(ANTYPE,TRANS)).
ALL - All of the above solution items.
freq
Print solution for this item every Freqth (and the last) substep of
each load step. If -n, print up to n equally spaced solutions
(only applies to static or full transient analyses when automatic
time stepping is enabled). If NONE, suppress all printout for this
item for this load step. If ALL, print solution for this item for
every substep. If LAST, print solution for this item only for the
last substep of each load step. For a modal analysis, use NONE or
ALL.
cname
Name of the component, created with the CM command, defining the
selected set of nodes or elements for which this specification is
active. If blank, the set is all entities.
Notes
-----
Controls the solution items to be printed, the frequency with which
they are printed (in static, transient, or full harmonic analyses), and
the set of nodes or elements to which this specification applies (in
static, transient, or full harmonic analyses). An item is associated
with either a node (NSOL, RFORCE, V, and A items) or an element (all of
the remaining items). The specifications are processed in the order
that they are input. Up to 50 specifications (OUTPR and OUTRES
commands combined) may be defined. Use OUTPR,STAT to list the current
specifications and use OUTPR,ERASE to erase all the current
specifications.
As described above, OUTPR writes some or all items (depending on
analysis type) for all elements. To restrict the solution printout,
use OUTPR to selectively suppress (Freq = NONE) the writing of solution
data, or first suppress the writing of all solution data
(OUTPR,ALL,NONE) and then selectively turn on the writing of solution
data with subsequent OUTPR commands.
If the generalized plane strain feature is active and OUTPR is issued,
the change of fiber length at the ending point during deformation and
the rotation of the ending plane about X and Y during deformation will
be printed if any displacement at the nodes is printed. The reaction
forces at the ending point will be printed if any reaction force at the
nodes is printed.
Nodal reaction loads (Item = RSOL) are processed according to the
specifications listed for the PRRSOL command.
Result printouts for interactive sessions are suppressed for models
with more than 10 elements.
This command is also valid in PREP7.
"""
command = f"OUTPR,{item},{freq},{cname}"
return self.run(command, **kwargs)
def outres(self, item="", freq="", cname="", nsvar="", dsubres="", **kwargs):
"""Controls the solution data written to the database.
APDL Command: OUTRES
Parameters
----------
item
Results item for database and file write control:
ALL - All solution items except LOCI and SVAR. This behavior is the default.
CINT - All available results generated by the CINT command
ERASE - Resets OUTRES specifications to their default values.
STAT - Lists the current OUTRES specifications.
BASIC - Write only NSOL, RSOL, NLOAD, STRS, FGRAD, and FFLUX records to the results
file and database.
NSOL - Nodal DOF solution.
RSOL - Nodal reaction loads.
V - Nodal velocity (applicable to structural full transient analysis only
(ANTYPE,TRANS)).
A - Nodal acceleration (applicable to structural full transient analysis only
(ANTYPE,TRANS)).
ESOL - Element solution (includes all items following):
NLOAD - Element nodal, input constraint, and force loads (also used with the /POST1
commands PRRFOR, NFORCE, and FSUM to calculate reaction
loads).
STRS - Element nodal stresses.
EPEL - Element elastic strains.
EPTH - Element thermal, initial, and swelling strains.
EPPL - Element plastic strains.
EPCR - Element creep strains.
EPDI - Element diffusion strains.
FGRAD - Element nodal gradients.
FFLUX - Element nodal fluxes.
LOCI - Integration point locations.
SVAR - State variables (used only by UserMat).
MISC - Element miscellaneous data (SMISC and NMISC items of the ETABLE command).
freq
Specifies how often (that is, at which substeps) to write the
specified solution results item. The following values are valid:
cname
The name of the component, created with the CM command, defining
the selected set of elements or nodes for which this specification
is active. If blank, the set is all entities. A component name is
not allowed with the ALL, BASIC, or RSOL items.
--
Reserved for future use.
nsvar
The number of user-defined state variables (TB,STATE) to be written
to the results file. Valid only when Item = SVAR and user-defined
state variables exist. The specified value cannot exceed the total
number of state variables defined; if no value is specified, all
user-defined state variables are written to the results file. This
argument acts on all sets of user-defined state variables that
exist for the model.
dsubres
Specifies whether to write additional results in Jobname.DSUB
during a substructure or CMS use pass in transient or harmonic
analysis.
Blank - Write the nodal DOF solution in Jobname.DSUB (default).
ALL - In addition to the nodal DOF solution, also write necessary data to compute
quantities using nodal velocity and nodal acceleration
(damping force, inertial force, kinetic energy, etc.) in the
subsequent expansion pass. For more information, see Step 3:
Expansion Pass in the Substructuring Analysis Guide.
Notes
-----
The OUTRES command allows you to specify the following:
The solution item (Item) to write to the database (and to the reduced
displacement and results files)
The frequency (Freq) at which the solution item is written (applicable
to static, transient, or full harmonic analyses)
The set of elements or nodes (Cname) to which your specification
applies.
"""
command = f"OUTRES,{item},{freq},{cname},{nsvar},{dsubres}"
return self.run(command, **kwargs)
def rescontrol(self, action="", ldstep="", frequency="", maxfiles="", **kwargs):
"""Controls file writing for multiframe restarts.
APDL Command: RESCONTROL
Parameters
----------
action
Command action. Valid options are:
DEFINE - Issuing the command specifies how frequently the .Xnnn restart files are
written for a load step (default).
FILE_SUMMARY - Issuing the command prints the substep and load step information for all .Xnnn
files for the current jobname in the current
directory. If this option is specified, all other
arguments are ignored.
STATUS - Issuing the command lists the current status in the tables of restart controls
specified previously by RESCONTROL. If this option is
specified, all other arguments are ignored.
NORESTART - Issuing the command cleans up some of the restart files after a Distributed
ANSYS solution. The host process will not have the
following files in the working directory at the end of
the run: .ESAV, .OSAV, .Xnnn, .RDB, .LDHI. The slave
processes will not have the following files in the
working directory at the end of the run: .ESAV, .OSAV,
.Xnnn, .RST (or .RTH, etc.). Some of the restart files
are never written, some are removed upon leaving /SOLU
(for example, upon FINISH), and some are removed upon
exiting the program.
This option is useful for cleaning up files written by all of the Distributed ANSYS processes, particularly when you know that these restart files will not be needed later on. If this option is specified, all other arguments are ignored. - If this option is used in shared-memory parallel ANSYS, most of the restart
files in the working directory are removed. It
has the same effect as issuing RESCONTROL,,NONE.
LINEAR - Issuing the command specifies the same actions as Action = DEFINE. However,
this option is intended for linear static applications.
For a linear static analysis, the restart capability is
normally not needed. However, it is typically needed when
a subsequent linear perturbation analysis is desired. By
default, none of the restart files are written for a
linear static analysis.
DELETE - Delete the restart control specification corresponding to the Ldstep label on a
previous RESCONTROL,DEFINE command.
ldstep
Specifies how the .Xnnn files are written for the specified load
steps. This option also affects how often the load history
information is written to the .LDHI file.
ALL - Write the .Xnnn files at the same substep Frequency for all load steps; write
the load history information to the .LDHI file for all load
steps.
LAST - Write the .Xnnn files for the last load step only; write load history
information to the .LDHI file for the last load step only.
This option is the default for nonlinear static and full
transient analyses. The remaining arguments are ignored.
N - Number that indicates how often the .Xnnn file is written.
Input a positive number to write the .Xnnn files at the substep Frequency indicated only for load step N. Other load steps will be written at the default substep frequency or at a frequency defined by a previous RESCONTROL specification. Load history information is written to the .LDHI file only for load steps N. - Input a negative number (-N) to write the .Xnnn files for every Nth load step
at the specified substep Frequency. The load
history information is written to the .LDHI file
every Nth load step. This option is suitable for
restart applications in which more than a few
hundred load steps are required. Compared to the
ALL and positive N options, it can save disk
space since the .LDHI file is smaller and fewer
.Xnnn files are written.
If Ldstep = -N, all other Ldstep options specified by RESCONTROL are ignored and the program follows the -N option (write load history information every Nth load step). If you want to change this pattern, issue RESCONTROL,DELETE, -N and then issue another RESCONTROL command with the desired Ldstep option. - NONE
No multiframe restart files (.RDB [restart database file], .LDHI [load history file], .Xnnn) are created. This option is the default for mode-superposition analyses. The remaining arguments are ignored. - For nonlinear static, linear static, and full transient analyses, this option
allows a restart to be done at the last or abort
point using the same procedure as in ANSYS 5.5 or
earlier (using the .EMAT, .ESAV or .OSAV, and .DB
files).
frequency
Frequency at which the .Xnnn files are written at the substep
level.
NONE - Do not write any .Xnnn files for this load step.
LAST - Write the .Xnnn files for the last substep of the load step only (default for
nonlinear static and full transient analyses).
N - If N is positive, write the .Xnnn file every Nth substep of a load step. If N
is negative, write N equally spaced .Xnnn files within a load
step.
In nonlinear static and full transient analyses, negative N is valid only when AUTOTS,ON. - In mode-superposition analyses, negative N is always valid.
maxfiles
Maximum number of .Xnnn files to save for Ldstep.
-1 - Overwrite existing .Xnnn files (default). The total maximum number of .Xnnn
files for one run is 999. If this number is reached before the
analysis is complete, the program will reset the .Xnnn file
numbering back to 1 and continue to write .Xnnn files; the
program keeps the newest 999 restart files and overwrites the
oldest restart files.
0 - Do not overwrite any existing .Xnnn files. The total maximum number of .Xnnn
files for one run is 999. If this number is reached before the
analysis is complete, the analysis continues but no longer
writes any .Xnnn files.
"""
command = f"RESCONTROL,{action},{ldstep},{frequency},{maxfiles}"
return self.run(command, **kwargs)
def sbclist(self, **kwargs):
"""Lists solid model boundary conditions.
APDL Command: SBCLIST
Notes
-----
Lists all solid model boundary conditions for the selected solid model
entities. See also DKLIST, DLLIST, DALIST, FKLIST, SFLLIST, SFALIST,
BFLLIST, BFALIST, BFVLIST, and BFKLIST to list items separately.
This command is valid in any processor.
"""
command = f"SBCLIST,"
return self.run(command, **kwargs)
def sbctran(self, **kwargs):
"""Transfers solid model loads and boundary conditions to the FE model.
APDL Command: SBCTRAN
Notes
-----
Causes a manual transfer of solid model loads and boundary conditions
to the finite element model. Loads and boundary conditions on
unselected keypoints, lines, areas, and volumes are not transferred.
Boundary conditions and loads will not be transferred to unselected
nodes or elements. The SBCTRAN operation is also automatically done
upon initiation of the solution calculations [SOLVE].
This command is also valid in PREP7.
"""
command = f"SBCTRAN,"
return self.run(command, **kwargs)
def wsprings(self, **kwargs):
"""Creates weak springs on corner nodes of a bounding box of the currently
APDL Command: WSPRINGS
selected elements.
Notes
-----
WSPRINGS invokes a predefined ANSYS macro that is used during the
import of loads from the ADAMS program into the ANSYS program. WSPRINGS
creates weak springs on the corner nodes of the bounding box of the
currently selected elements. The six nodes of the bounding box are
attached to ground using COMBIN14 elements. The stiffness is chosen as
a small number and can be changed by changing the real constants of the
COMBIN14 elements. This command works only for models that have a
geometric extension in two or three dimensions. One dimensional
problems (pure beam in one axis) are not supported.
For more information on how WSPRINGS is used during the transfer of
loads from the ADAMS program to ANSYS, see Import Loads into ANSYS in
the Substructuring Analysis Guide.
Distributed ANSYS Restriction: This command is not supported in
Distributed ANSYS.
"""
command = f"WSPRINGS,"
return self.run(command, **kwargs) | 0.967 | 0.835618 |
import asyncio
import logging
from collections import OrderedDict
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
DOMAIN, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_NAME, STATE_OFF, STATE_ON, EVENT_HOMEASSISTANT_STOP)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ENDPOINT = 'endpoint'
PARAM_NAME = 'name'
PARAM_VALUE = 'value'
PLATFORM = 'songpal'
SET_SOUND_SETTING = 'songpal_set_sound_setting'
SUPPORT_SONGPAL = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_SELECT_SOURCE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENDPOINT): cv.string,
})
SET_SOUND_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(PARAM_NAME): cv.string,
vol.Required(PARAM_VALUE): cv.string,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Songpal platform."""
from songpal import SongpalException
if PLATFORM not in hass.data:
hass.data[PLATFORM] = {}
if discovery_info is not None:
name = discovery_info["name"]
endpoint = discovery_info["properties"]["endpoint"]
_LOGGER.debug("Got autodiscovered %s - endpoint: %s", name, endpoint)
device = SongpalDevice(name, endpoint)
else:
name = config.get(CONF_NAME)
endpoint = config.get(CONF_ENDPOINT)
device = SongpalDevice(name, endpoint, poll=False)
if endpoint in hass.data[PLATFORM]:
_LOGGER.debug("The endpoint exists already, skipping setup.")
return
try:
await device.initialize()
except SongpalException as ex:
_LOGGER.error("Unable to get methods from songpal: %s", ex)
raise PlatformNotReady
hass.data[PLATFORM][endpoint] = device
async_add_entities([device], True)
async def async_service_handler(service):
"""Service handler."""
entity_id = service.data.get("entity_id", None)
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
for device in hass.data[PLATFORM].values():
if device.entity_id == entity_id or entity_id is None:
_LOGGER.debug("Calling %s (entity: %s) with params %s",
service, entity_id, params)
await device.async_set_sound_setting(
params[PARAM_NAME], params[PARAM_VALUE])
hass.services.async_register(
DOMAIN, SET_SOUND_SETTING, async_service_handler,
schema=SET_SOUND_SCHEMA)
class SongpalDevice(MediaPlayerDevice):
"""Class representing a Songpal device."""
def __init__(self, name, endpoint, poll=False):
"""Init."""
from songpal import Device
self._name = name
self._endpoint = endpoint
self._poll = poll
self.dev = Device(self._endpoint)
self._sysinfo = None
self._state = False
self._available = False
self._initialized = False
self._volume_control = None
self._volume_min = 0
self._volume_max = 1
self._volume = 0
self._is_muted = False
self._active_source = None
self._sources = {}
@property
def should_poll(self):
"""Return True if the device should be polled."""
return self._poll
async def initialize(self):
"""Initialize the device."""
await self.dev.get_supported_methods()
self._sysinfo = await self.dev.get_system_info()
async def async_activate_websocket(self):
"""Activate websocket for listening if wanted."""
_LOGGER.info("Activating websocket connection..")
from songpal import (VolumeChange, ContentChange,
PowerChange, ConnectChange)
async def _volume_changed(volume: VolumeChange):
_LOGGER.debug("Volume changed: %s", volume)
self._volume = volume.volume
self._is_muted = volume.mute
await self.async_update_ha_state()
async def _source_changed(content: ContentChange):
_LOGGER.debug("Source changed: %s", content)
if content.is_input:
self._active_source = self._sources[content.source]
_LOGGER.debug("New active source: %s", self._active_source)
await self.async_update_ha_state()
else:
_LOGGER.debug("Got non-handled content change: %s",
content)
async def _power_changed(power: PowerChange):
_LOGGER.debug("Power changed: %s", power)
self._state = power.status
await self.async_update_ha_state()
async def _try_reconnect(connect: ConnectChange):
_LOGGER.error("Got disconnected with %s, trying to reconnect.",
connect.exception)
self._available = False
self.dev.clear_notification_callbacks()
await self.async_update_ha_state()
# Try to reconnect forever, a successful reconnect will initialize
# the websocket connection again.
delay = 10
while not self._available:
_LOGGER.debug("Trying to reconnect in %s seconds", delay)
await asyncio.sleep(delay)
# We need to inform HA about the state in case we are coming
# back from a disconnected state.
await self.async_update_ha_state(force_refresh=True)
delay = min(2*delay, 300)
_LOGGER.info("Reconnected to %s", self.name)
self.dev.on_notification(VolumeChange, _volume_changed)
self.dev.on_notification(ContentChange, _source_changed)
self.dev.on_notification(PowerChange, _power_changed)
self.dev.on_notification(ConnectChange, _try_reconnect)
async def listen_events():
await self.dev.listen_notifications()
async def handle_stop(event):
await self.dev.stop_listen_notifications()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, handle_stop)
self.hass.loop.create_task(listen_events())
@property
def name(self):
"""Return name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._sysinfo.macAddr
@property
def available(self):
"""Return availability of the device."""
return self._available
async def async_set_sound_setting(self, name, value):
"""Change a setting on the device."""
await self.dev.set_sound_settings(name, value)
async def async_update(self):
"""Fetch updates from the device."""
from songpal import SongpalException
try:
volumes = await self.dev.get_volume_information()
if not volumes:
_LOGGER.error("Got no volume controls, bailing out")
self._available = False
return
if len(volumes) > 1:
_LOGGER.debug(
"Got %s volume controls, using the first one", volumes)
volume = volumes[0]
_LOGGER.debug("Current volume: %s", volume)
self._volume_max = volume.maxVolume
self._volume_min = volume.minVolume
self._volume = volume.volume
self._volume_control = volume
self._is_muted = self._volume_control.is_muted
status = await self.dev.get_power()
self._state = status.status
_LOGGER.debug("Got state: %s", status)
inputs = await self.dev.get_inputs()
_LOGGER.debug("Got ins: %s", inputs)
self._sources = OrderedDict()
for input_ in inputs:
self._sources[input_.uri] = input_
if input_.active:
self._active_source = input_
_LOGGER.debug("Active source: %s", self._active_source)
self._available = True
# activate notifications if wanted
if not self._poll:
await self.hass.async_create_task(
self.async_activate_websocket())
except SongpalException as ex:
_LOGGER.error("Unable to update: %s", ex)
self._available = False
async def async_select_source(self, source):
"""Select source."""
for out in self._sources.values():
if out.title == source:
await out.activate()
return
_LOGGER.error("Unable to find output: %s", source)
@property
def source_list(self):
"""Return list of available sources."""
return [src.title for src in self._sources.values()]
@property
def state(self):
"""Return current state."""
if self._state:
return STATE_ON
return STATE_OFF
@property
def source(self):
"""Return currently active source."""
# Avoid a KeyError when _active_source is not (yet) populated
return getattr(self._active_source, 'title', None)
@property
def volume_level(self):
"""Return volume level."""
volume = self._volume / self._volume_max
return volume
async def async_set_volume_level(self, volume):
"""Set volume level."""
volume = int(volume * self._volume_max)
_LOGGER.debug("Setting volume to %s", volume)
return await self._volume_control.set_volume(volume)
async def async_volume_up(self):
"""Set volume up."""
return await self._volume_control.set_volume("+1")
async def async_volume_down(self):
"""Set volume down."""
return await self._volume_control.set_volume("-1")
async def async_turn_on(self):
"""Turn the device on."""
return await self.dev.set_power(True)
async def async_turn_off(self):
"""Turn the device off."""
return await self.dev.set_power(False)
async def async_mute_volume(self, mute):
"""Mute or unmute the device."""
_LOGGER.debug("Set mute: %s", mute)
return await self._volume_control.set_mute(mute)
@property
def is_volume_muted(self):
"""Return whether the device is muted."""
return self._is_muted
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_SONGPAL | homeassistant/components/songpal/media_player.py | import asyncio
import logging
from collections import OrderedDict
import voluptuous as vol
from homeassistant.components.media_player import (
MediaPlayerDevice, PLATFORM_SCHEMA)
from homeassistant.components.media_player.const import (
DOMAIN, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF,
SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP)
from homeassistant.const import (
ATTR_ENTITY_ID, CONF_NAME, STATE_OFF, STATE_ON, EVENT_HOMEASSISTANT_STOP)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_ENDPOINT = 'endpoint'
PARAM_NAME = 'name'
PARAM_VALUE = 'value'
PLATFORM = 'songpal'
SET_SOUND_SETTING = 'songpal_set_sound_setting'
SUPPORT_SONGPAL = SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | \
SUPPORT_VOLUME_MUTE | SUPPORT_SELECT_SOURCE | \
SUPPORT_TURN_ON | SUPPORT_TURN_OFF
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_ENDPOINT): cv.string,
})
SET_SOUND_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(PARAM_NAME): cv.string,
vol.Required(PARAM_VALUE): cv.string,
})
async def async_setup_platform(
hass, config, async_add_entities, discovery_info=None):
"""Set up the Songpal platform."""
from songpal import SongpalException
if PLATFORM not in hass.data:
hass.data[PLATFORM] = {}
if discovery_info is not None:
name = discovery_info["name"]
endpoint = discovery_info["properties"]["endpoint"]
_LOGGER.debug("Got autodiscovered %s - endpoint: %s", name, endpoint)
device = SongpalDevice(name, endpoint)
else:
name = config.get(CONF_NAME)
endpoint = config.get(CONF_ENDPOINT)
device = SongpalDevice(name, endpoint, poll=False)
if endpoint in hass.data[PLATFORM]:
_LOGGER.debug("The endpoint exists already, skipping setup.")
return
try:
await device.initialize()
except SongpalException as ex:
_LOGGER.error("Unable to get methods from songpal: %s", ex)
raise PlatformNotReady
hass.data[PLATFORM][endpoint] = device
async_add_entities([device], True)
async def async_service_handler(service):
"""Service handler."""
entity_id = service.data.get("entity_id", None)
params = {key: value for key, value in service.data.items()
if key != ATTR_ENTITY_ID}
for device in hass.data[PLATFORM].values():
if device.entity_id == entity_id or entity_id is None:
_LOGGER.debug("Calling %s (entity: %s) with params %s",
service, entity_id, params)
await device.async_set_sound_setting(
params[PARAM_NAME], params[PARAM_VALUE])
hass.services.async_register(
DOMAIN, SET_SOUND_SETTING, async_service_handler,
schema=SET_SOUND_SCHEMA)
class SongpalDevice(MediaPlayerDevice):
"""Class representing a Songpal device."""
def __init__(self, name, endpoint, poll=False):
"""Init."""
from songpal import Device
self._name = name
self._endpoint = endpoint
self._poll = poll
self.dev = Device(self._endpoint)
self._sysinfo = None
self._state = False
self._available = False
self._initialized = False
self._volume_control = None
self._volume_min = 0
self._volume_max = 1
self._volume = 0
self._is_muted = False
self._active_source = None
self._sources = {}
@property
def should_poll(self):
"""Return True if the device should be polled."""
return self._poll
async def initialize(self):
"""Initialize the device."""
await self.dev.get_supported_methods()
self._sysinfo = await self.dev.get_system_info()
async def async_activate_websocket(self):
"""Activate websocket for listening if wanted."""
_LOGGER.info("Activating websocket connection..")
from songpal import (VolumeChange, ContentChange,
PowerChange, ConnectChange)
async def _volume_changed(volume: VolumeChange):
_LOGGER.debug("Volume changed: %s", volume)
self._volume = volume.volume
self._is_muted = volume.mute
await self.async_update_ha_state()
async def _source_changed(content: ContentChange):
_LOGGER.debug("Source changed: %s", content)
if content.is_input:
self._active_source = self._sources[content.source]
_LOGGER.debug("New active source: %s", self._active_source)
await self.async_update_ha_state()
else:
_LOGGER.debug("Got non-handled content change: %s",
content)
async def _power_changed(power: PowerChange):
_LOGGER.debug("Power changed: %s", power)
self._state = power.status
await self.async_update_ha_state()
async def _try_reconnect(connect: ConnectChange):
_LOGGER.error("Got disconnected with %s, trying to reconnect.",
connect.exception)
self._available = False
self.dev.clear_notification_callbacks()
await self.async_update_ha_state()
# Try to reconnect forever, a successful reconnect will initialize
# the websocket connection again.
delay = 10
while not self._available:
_LOGGER.debug("Trying to reconnect in %s seconds", delay)
await asyncio.sleep(delay)
# We need to inform HA about the state in case we are coming
# back from a disconnected state.
await self.async_update_ha_state(force_refresh=True)
delay = min(2*delay, 300)
_LOGGER.info("Reconnected to %s", self.name)
self.dev.on_notification(VolumeChange, _volume_changed)
self.dev.on_notification(ContentChange, _source_changed)
self.dev.on_notification(PowerChange, _power_changed)
self.dev.on_notification(ConnectChange, _try_reconnect)
async def listen_events():
await self.dev.listen_notifications()
async def handle_stop(event):
await self.dev.stop_listen_notifications()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, handle_stop)
self.hass.loop.create_task(listen_events())
@property
def name(self):
"""Return name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self._sysinfo.macAddr
@property
def available(self):
"""Return availability of the device."""
return self._available
async def async_set_sound_setting(self, name, value):
"""Change a setting on the device."""
await self.dev.set_sound_settings(name, value)
async def async_update(self):
"""Fetch updates from the device."""
from songpal import SongpalException
try:
volumes = await self.dev.get_volume_information()
if not volumes:
_LOGGER.error("Got no volume controls, bailing out")
self._available = False
return
if len(volumes) > 1:
_LOGGER.debug(
"Got %s volume controls, using the first one", volumes)
volume = volumes[0]
_LOGGER.debug("Current volume: %s", volume)
self._volume_max = volume.maxVolume
self._volume_min = volume.minVolume
self._volume = volume.volume
self._volume_control = volume
self._is_muted = self._volume_control.is_muted
status = await self.dev.get_power()
self._state = status.status
_LOGGER.debug("Got state: %s", status)
inputs = await self.dev.get_inputs()
_LOGGER.debug("Got ins: %s", inputs)
self._sources = OrderedDict()
for input_ in inputs:
self._sources[input_.uri] = input_
if input_.active:
self._active_source = input_
_LOGGER.debug("Active source: %s", self._active_source)
self._available = True
# activate notifications if wanted
if not self._poll:
await self.hass.async_create_task(
self.async_activate_websocket())
except SongpalException as ex:
_LOGGER.error("Unable to update: %s", ex)
self._available = False
async def async_select_source(self, source):
"""Select source."""
for out in self._sources.values():
if out.title == source:
await out.activate()
return
_LOGGER.error("Unable to find output: %s", source)
@property
def source_list(self):
"""Return list of available sources."""
return [src.title for src in self._sources.values()]
@property
def state(self):
"""Return current state."""
if self._state:
return STATE_ON
return STATE_OFF
@property
def source(self):
"""Return currently active source."""
# Avoid a KeyError when _active_source is not (yet) populated
return getattr(self._active_source, 'title', None)
@property
def volume_level(self):
"""Return volume level."""
volume = self._volume / self._volume_max
return volume
async def async_set_volume_level(self, volume):
"""Set volume level."""
volume = int(volume * self._volume_max)
_LOGGER.debug("Setting volume to %s", volume)
return await self._volume_control.set_volume(volume)
async def async_volume_up(self):
"""Set volume up."""
return await self._volume_control.set_volume("+1")
async def async_volume_down(self):
"""Set volume down."""
return await self._volume_control.set_volume("-1")
async def async_turn_on(self):
"""Turn the device on."""
return await self.dev.set_power(True)
async def async_turn_off(self):
"""Turn the device off."""
return await self.dev.set_power(False)
async def async_mute_volume(self, mute):
"""Mute or unmute the device."""
_LOGGER.debug("Set mute: %s", mute)
return await self._volume_control.set_mute(mute)
@property
def is_volume_muted(self):
"""Return whether the device is muted."""
return self._is_muted
@property
def supported_features(self):
"""Return supported features."""
return SUPPORT_SONGPAL | 0.522689 | 0.09739 |
import sys
import gym
import pybullet_envs
import argparse
import itertools
import collections
import copy
import time
import numpy as np
import torch
import torch.nn as nn
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
NOISE_STD = 0.005
POPULATION_SIZE = 2000
PARENTS_COUNT = 10
WORKERS_COUNT = 2
SEEDS_PER_WORKER = POPULATION_SIZE // WORKERS_COUNT
MAX_SEED = 2**32 - 1
class MultiNoiseLinear(nn.Linear):
def set_noise_dim(self, dim):
assert isinstance(dim, int)
assert dim > 0
self.register_buffer('noise', torch.FloatTensor(dim, self.out_features, self.in_features))
self.register_buffer('noise_bias', torch.FloatTensor(dim, self.out_features))
def sample_noise_row(self, row):
# sample noise for our params
w_noise = NOISE_STD * torch.tensor(np.random.normal(size=self.weight.data.size()).astype(np.float32))
b_noise = NOISE_STD * torch.tensor(np.random.normal(size=self.bias.data.size()).astype(np.float32))
self.noise[row].copy_(w_noise)
self.noise_bias[row].copy_(b_noise)
def zero_noise(self):
self.noise.zero_()
self.noise_bias.zero_()
def forward(self, x):
o = super(MultiNoiseLinear, self).forward(x)
o_n = torch.matmul(self.noise, x.data.unsqueeze(-1)).squeeze(-1)
o.data += o_n + self.noise_bias
return o
class Net(nn.Module):
def __init__(self, obs_size, act_size, hid_size=64):
super(Net, self).__init__()
self.nonlin = nn.Tanh()
self.l1 = MultiNoiseLinear(obs_size, hid_size)
self.l2 = MultiNoiseLinear(hid_size, hid_size)
self.l3 = MultiNoiseLinear(hid_size, act_size)
def forward(self, x):
l1 = self.nonlin(self.l1(x))
l2 = self.nonlin(self.l2(l1))
l3 = self.nonlin(self.l3(l2))
return l3
def set_noise_seeds(self, seeds):
batch_size = len(seeds)
self.l1.set_noise_dim(batch_size)
self.l2.set_noise_dim(batch_size)
self.l3.set_noise_dim(batch_size)
for idx, seed in enumerate(seeds):
np.random.seed(seed)
self.l1.sample_noise_row(idx)
self.l2.sample_noise_row(idx)
self.l3.sample_noise_row(idx)
def zero_noise(self, batch_size):
self.l1.set_noise_dim(batch_size)
self.l2.set_noise_dim(batch_size)
self.l3.set_noise_dim(batch_size)
self.l1.zero_noise()
self.l2.zero_noise()
self.l3.zero_noise()
def evaluate(env, net, device="cpu"):
obs = env.reset()
reward = 0.0
steps = 0
while True:
obs_v = torch.FloatTensor([obs]).to(device)
action_v = net(obs_v)
obs, r, done, _ = env.step(action_v.data.cpu().numpy()[0])
reward += r
steps += 1
if done:
break
return reward, steps
def evaluate_batch(envs, net, device="cpu"):
count = len(envs)
obs = [e.reset() for e in envs]
rewards = [0.0 for _ in range(count)]
steps = [0 for _ in range(count)]
done_set = set()
while len(done_set) < count:
obs_v = torch.FloatTensor(obs).to(device)
out_v = net(obs_v)
out = out_v.data.cpu().numpy()
for i in range(count):
if i in done_set:
continue
new_o, r, done, _ = envs[i].step(out[i])
obs[i] = new_o
rewards[i] += r
steps[i] += 1
if done:
done_set.add(i)
return rewards, steps
def mutate_net(net, seed, copy_net=True):
new_net = copy.deepcopy(net) if copy_net else net
np.random.seed(seed)
for p in new_net.parameters():
noise_t = torch.from_numpy(np.random.normal(size=p.data.size()).astype(np.float32))
p.data += NOISE_STD * noise_t
return new_net
def build_net(env, seeds):
torch.manual_seed(seeds[0])
net = Net(env.observation_space.shape[0], env.action_space.shape[0])
for seed in seeds[1:]:
net = mutate_net(net, seed, copy_net=False)
return net
OutputItem = collections.namedtuple('OutputItem', field_names=['seeds', 'reward', 'steps'])
def worker_func(input_queue, output_queue, device="cpu"):
env_pool = [gym.make("HalfCheetahBulletEnv-v0")]
# first generation -- just evaluate given single seeds
parents = input_queue.get()
for seed in parents:
net = build_net(env_pool[0], seed).to(device)
net.zero_noise(batch_size=1)
reward, steps = evaluate(env_pool[0], net, device)
output_queue.put((seed, reward, steps))
while True:
parents = input_queue.get()
if parents is None:
break
parents.sort()
for parent_seeds, children_iter in itertools.groupby(parents, key=lambda s: s[:-1]):
batch = list(children_iter)
children_seeds = [b[-1] for b in batch]
net = build_net(env_pool[0], parent_seeds).to(device)
net.set_noise_seeds(children_seeds)
batch_size = len(children_seeds)
while len(env_pool) < batch_size:
env_pool.append(gym.make("HalfCheetahBulletEnv-v0"))
rewards, steps = evaluate_batch(env_pool[:batch_size], net, device)
for seeds, reward, step in zip(batch, rewards, steps):
output_queue.put((seeds, reward, step))
if __name__ == "__main__":
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action='store_true')
args = parser.parse_args()
writer = SummaryWriter(comment="-cheetah-ga-batch")
device = "cuda" if args.cuda else "cpu"
input_queues = []
output_queue = mp.Queue(maxsize=WORKERS_COUNT)
workers = []
for _ in range(WORKERS_COUNT):
input_queue = mp.Queue(maxsize=1)
input_queues.append(input_queue)
w = mp.Process(target=worker_func, args=(input_queue, output_queue, device))
w.start()
seeds = [(np.random.randint(MAX_SEED),) for _ in range(SEEDS_PER_WORKER)]
input_queue.put(seeds)
gen_idx = 0
elite = None
while True:
t_start = time.time()
batch_steps = 0
population = []
while len(population) < SEEDS_PER_WORKER * WORKERS_COUNT:
seeds, reward, steps = output_queue.get()
population.append((seeds, reward))
batch_steps += steps
if elite is not None:
population.append(elite)
population.sort(key=lambda p: p[1], reverse=True)
rewards = [p[1] for p in population[:PARENTS_COUNT]]
reward_mean = np.mean(rewards)
reward_max = np.max(rewards)
reward_std = np.std(rewards)
writer.add_scalar("reward_mean", reward_mean, gen_idx)
writer.add_scalar("reward_std", reward_std, gen_idx)
writer.add_scalar("reward_max", reward_max, gen_idx)
writer.add_scalar("batch_steps", batch_steps, gen_idx)
writer.add_scalar("gen_seconds", time.time() - t_start, gen_idx)
speed = batch_steps / (time.time() - t_start)
writer.add_scalar("speed", speed, gen_idx)
print("%d: reward_mean=%.2f, reward_max=%.2f, reward_std=%.2f, speed=%.2f f/s" % (
gen_idx, reward_mean, reward_max, reward_std, speed))
elite = population[0]
for worker_queue in input_queues:
seeds = []
for _ in range(SEEDS_PER_WORKER):
parent = np.random.randint(PARENTS_COUNT)
next_seed = np.random.randint(MAX_SEED)
seeds.append(tuple(list(population[parent][0]) + [next_seed]))
worker_queue.put(seeds)
gen_idx += 1
pass | Chapter20/05_cheetah_ga_batch.py | import sys
import gym
import pybullet_envs
import argparse
import itertools
import collections
import copy
import time
import numpy as np
import torch
import torch.nn as nn
import torch.multiprocessing as mp
from tensorboardX import SummaryWriter
NOISE_STD = 0.005
POPULATION_SIZE = 2000
PARENTS_COUNT = 10
WORKERS_COUNT = 2
SEEDS_PER_WORKER = POPULATION_SIZE // WORKERS_COUNT
MAX_SEED = 2**32 - 1
class MultiNoiseLinear(nn.Linear):
def set_noise_dim(self, dim):
assert isinstance(dim, int)
assert dim > 0
self.register_buffer('noise', torch.FloatTensor(dim, self.out_features, self.in_features))
self.register_buffer('noise_bias', torch.FloatTensor(dim, self.out_features))
def sample_noise_row(self, row):
# sample noise for our params
w_noise = NOISE_STD * torch.tensor(np.random.normal(size=self.weight.data.size()).astype(np.float32))
b_noise = NOISE_STD * torch.tensor(np.random.normal(size=self.bias.data.size()).astype(np.float32))
self.noise[row].copy_(w_noise)
self.noise_bias[row].copy_(b_noise)
def zero_noise(self):
self.noise.zero_()
self.noise_bias.zero_()
def forward(self, x):
o = super(MultiNoiseLinear, self).forward(x)
o_n = torch.matmul(self.noise, x.data.unsqueeze(-1)).squeeze(-1)
o.data += o_n + self.noise_bias
return o
class Net(nn.Module):
def __init__(self, obs_size, act_size, hid_size=64):
super(Net, self).__init__()
self.nonlin = nn.Tanh()
self.l1 = MultiNoiseLinear(obs_size, hid_size)
self.l2 = MultiNoiseLinear(hid_size, hid_size)
self.l3 = MultiNoiseLinear(hid_size, act_size)
def forward(self, x):
l1 = self.nonlin(self.l1(x))
l2 = self.nonlin(self.l2(l1))
l3 = self.nonlin(self.l3(l2))
return l3
def set_noise_seeds(self, seeds):
batch_size = len(seeds)
self.l1.set_noise_dim(batch_size)
self.l2.set_noise_dim(batch_size)
self.l3.set_noise_dim(batch_size)
for idx, seed in enumerate(seeds):
np.random.seed(seed)
self.l1.sample_noise_row(idx)
self.l2.sample_noise_row(idx)
self.l3.sample_noise_row(idx)
def zero_noise(self, batch_size):
self.l1.set_noise_dim(batch_size)
self.l2.set_noise_dim(batch_size)
self.l3.set_noise_dim(batch_size)
self.l1.zero_noise()
self.l2.zero_noise()
self.l3.zero_noise()
def evaluate(env, net, device="cpu"):
obs = env.reset()
reward = 0.0
steps = 0
while True:
obs_v = torch.FloatTensor([obs]).to(device)
action_v = net(obs_v)
obs, r, done, _ = env.step(action_v.data.cpu().numpy()[0])
reward += r
steps += 1
if done:
break
return reward, steps
def evaluate_batch(envs, net, device="cpu"):
count = len(envs)
obs = [e.reset() for e in envs]
rewards = [0.0 for _ in range(count)]
steps = [0 for _ in range(count)]
done_set = set()
while len(done_set) < count:
obs_v = torch.FloatTensor(obs).to(device)
out_v = net(obs_v)
out = out_v.data.cpu().numpy()
for i in range(count):
if i in done_set:
continue
new_o, r, done, _ = envs[i].step(out[i])
obs[i] = new_o
rewards[i] += r
steps[i] += 1
if done:
done_set.add(i)
return rewards, steps
def mutate_net(net, seed, copy_net=True):
new_net = copy.deepcopy(net) if copy_net else net
np.random.seed(seed)
for p in new_net.parameters():
noise_t = torch.from_numpy(np.random.normal(size=p.data.size()).astype(np.float32))
p.data += NOISE_STD * noise_t
return new_net
def build_net(env, seeds):
torch.manual_seed(seeds[0])
net = Net(env.observation_space.shape[0], env.action_space.shape[0])
for seed in seeds[1:]:
net = mutate_net(net, seed, copy_net=False)
return net
OutputItem = collections.namedtuple('OutputItem', field_names=['seeds', 'reward', 'steps'])
def worker_func(input_queue, output_queue, device="cpu"):
env_pool = [gym.make("HalfCheetahBulletEnv-v0")]
# first generation -- just evaluate given single seeds
parents = input_queue.get()
for seed in parents:
net = build_net(env_pool[0], seed).to(device)
net.zero_noise(batch_size=1)
reward, steps = evaluate(env_pool[0], net, device)
output_queue.put((seed, reward, steps))
while True:
parents = input_queue.get()
if parents is None:
break
parents.sort()
for parent_seeds, children_iter in itertools.groupby(parents, key=lambda s: s[:-1]):
batch = list(children_iter)
children_seeds = [b[-1] for b in batch]
net = build_net(env_pool[0], parent_seeds).to(device)
net.set_noise_seeds(children_seeds)
batch_size = len(children_seeds)
while len(env_pool) < batch_size:
env_pool.append(gym.make("HalfCheetahBulletEnv-v0"))
rewards, steps = evaluate_batch(env_pool[:batch_size], net, device)
for seeds, reward, step in zip(batch, rewards, steps):
output_queue.put((seeds, reward, step))
if __name__ == "__main__":
mp.set_start_method('spawn')
parser = argparse.ArgumentParser()
parser.add_argument("--cuda", default=False, action='store_true')
args = parser.parse_args()
writer = SummaryWriter(comment="-cheetah-ga-batch")
device = "cuda" if args.cuda else "cpu"
input_queues = []
output_queue = mp.Queue(maxsize=WORKERS_COUNT)
workers = []
for _ in range(WORKERS_COUNT):
input_queue = mp.Queue(maxsize=1)
input_queues.append(input_queue)
w = mp.Process(target=worker_func, args=(input_queue, output_queue, device))
w.start()
seeds = [(np.random.randint(MAX_SEED),) for _ in range(SEEDS_PER_WORKER)]
input_queue.put(seeds)
gen_idx = 0
elite = None
while True:
t_start = time.time()
batch_steps = 0
population = []
while len(population) < SEEDS_PER_WORKER * WORKERS_COUNT:
seeds, reward, steps = output_queue.get()
population.append((seeds, reward))
batch_steps += steps
if elite is not None:
population.append(elite)
population.sort(key=lambda p: p[1], reverse=True)
rewards = [p[1] for p in population[:PARENTS_COUNT]]
reward_mean = np.mean(rewards)
reward_max = np.max(rewards)
reward_std = np.std(rewards)
writer.add_scalar("reward_mean", reward_mean, gen_idx)
writer.add_scalar("reward_std", reward_std, gen_idx)
writer.add_scalar("reward_max", reward_max, gen_idx)
writer.add_scalar("batch_steps", batch_steps, gen_idx)
writer.add_scalar("gen_seconds", time.time() - t_start, gen_idx)
speed = batch_steps / (time.time() - t_start)
writer.add_scalar("speed", speed, gen_idx)
print("%d: reward_mean=%.2f, reward_max=%.2f, reward_std=%.2f, speed=%.2f f/s" % (
gen_idx, reward_mean, reward_max, reward_std, speed))
elite = population[0]
for worker_queue in input_queues:
seeds = []
for _ in range(SEEDS_PER_WORKER):
parent = np.random.randint(PARENTS_COUNT)
next_seed = np.random.randint(MAX_SEED)
seeds.append(tuple(list(population[parent][0]) + [next_seed]))
worker_queue.put(seeds)
gen_idx += 1
pass | 0.709724 | 0.419053 |
from .test_utils import BaseWrapperTestCase
class TestDirtyInput(BaseWrapperTestCase):
def test_function_call_with_internal_trailing_comma(self) -> None:
self.assertTransform(
1,
8,
"""
foo('abcd', 1234, spam='ham',)
""",
"""
foo(
'abcd',
1234,
spam='ham',
)
""",
)
def test_function_def_with_internal_trailing_comma(self) -> None:
self.assertTransform(
1,
8,
"""
def foo(abcd, defg, spam='ham',):
pass
""",
"""
def foo(
abcd,
defg,
spam='ham',
):
pass
""",
)
def test_class_def_with_internal_trailing_comma(self) -> None:
self.assertTransform(
1,
8,
"""
class Foo(abcd, defg, spam='ham',):
pass
""",
"""
class Foo(
abcd,
defg,
spam='ham',
):
pass
""",
)
def test_function_call_with_trailing_comma_and_comment(self) -> None:
self.assertTransform(
1,
8,
"""
foo('abcd', 1234,
24 * 60 * 60, # 1 day
)
""",
"""
foo(
'abcd',
1234,
24 * 60 * 60, # 1 day
)
""",
)
def test_function_call_three_line_style(self) -> None:
self.assertTransform(
2,
8,
"""
foo(
'abcd', 1234, spam='ham'
)
""",
"""
foo(
'abcd',
1234,
spam='ham',
)
""",
)
def test_function_call_three_line_style_with_trailing_comma(self) -> None:
self.assertTransform(
2,
8,
"""
foo(
'abcd', 1234, spam='ham',
)
""",
"""
foo(
'abcd',
1234,
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_with_comment(self) -> None:
# We accept the misplacement of the comment here, rather than causing
# the misplacement of comments withing already "correctly" wrapped
# nested blocks.
self.assertTransform(
2,
8,
"""
foo(
'abcd', 1234,
# comment
spam='ham',
)
""",
"""
foo(
'abcd',
1234,
# comment
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_hugging_opening_paren(self) -> None:
self.assertTransform(
2,
8,
"""
foo('abcd',
spam='ham',
)
""",
"""
foo(
'abcd',
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_hugging_parens(self) -> None:
self.assertTransform(
2,
8,
"""
foo('abcd',
spam='ham')
""",
"""
foo(
'abcd',
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_hugging_trailing_paren(self) -> None:
self.assertTransform(
2,
8,
"""
foo(
'abcd',
spam='ham')
""",
"""
foo(
'abcd',
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_complex(self) -> None:
self.assertTransform(
2,
8,
"""
foo('abcd', 1234,
spam='ham', bees='spam')
""",
"""
foo(
'abcd',
1234,
spam='ham',
bees='spam',
)
""",
) | tests/tests_dirty_input.py | from .test_utils import BaseWrapperTestCase
class TestDirtyInput(BaseWrapperTestCase):
def test_function_call_with_internal_trailing_comma(self) -> None:
self.assertTransform(
1,
8,
"""
foo('abcd', 1234, spam='ham',)
""",
"""
foo(
'abcd',
1234,
spam='ham',
)
""",
)
def test_function_def_with_internal_trailing_comma(self) -> None:
self.assertTransform(
1,
8,
"""
def foo(abcd, defg, spam='ham',):
pass
""",
"""
def foo(
abcd,
defg,
spam='ham',
):
pass
""",
)
def test_class_def_with_internal_trailing_comma(self) -> None:
self.assertTransform(
1,
8,
"""
class Foo(abcd, defg, spam='ham',):
pass
""",
"""
class Foo(
abcd,
defg,
spam='ham',
):
pass
""",
)
def test_function_call_with_trailing_comma_and_comment(self) -> None:
self.assertTransform(
1,
8,
"""
foo('abcd', 1234,
24 * 60 * 60, # 1 day
)
""",
"""
foo(
'abcd',
1234,
24 * 60 * 60, # 1 day
)
""",
)
def test_function_call_three_line_style(self) -> None:
self.assertTransform(
2,
8,
"""
foo(
'abcd', 1234, spam='ham'
)
""",
"""
foo(
'abcd',
1234,
spam='ham',
)
""",
)
def test_function_call_three_line_style_with_trailing_comma(self) -> None:
self.assertTransform(
2,
8,
"""
foo(
'abcd', 1234, spam='ham',
)
""",
"""
foo(
'abcd',
1234,
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_with_comment(self) -> None:
# We accept the misplacement of the comment here, rather than causing
# the misplacement of comments withing already "correctly" wrapped
# nested blocks.
self.assertTransform(
2,
8,
"""
foo(
'abcd', 1234,
# comment
spam='ham',
)
""",
"""
foo(
'abcd',
1234,
# comment
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_hugging_opening_paren(self) -> None:
self.assertTransform(
2,
8,
"""
foo('abcd',
spam='ham',
)
""",
"""
foo(
'abcd',
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_hugging_parens(self) -> None:
self.assertTransform(
2,
8,
"""
foo('abcd',
spam='ham')
""",
"""
foo(
'abcd',
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_hugging_trailing_paren(self) -> None:
self.assertTransform(
2,
8,
"""
foo(
'abcd',
spam='ham')
""",
"""
foo(
'abcd',
spam='ham',
)
""",
)
def test_function_call_partly_wrapped_complex(self) -> None:
self.assertTransform(
2,
8,
"""
foo('abcd', 1234,
spam='ham', bees='spam')
""",
"""
foo(
'abcd',
1234,
spam='ham',
bees='spam',
)
""",
) | 0.688154 | 0.47384 |
from __future__ import unicode_literals
import re
from boto3 import Session
from collections import defaultdict
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel
from moto.core.exceptions import RESTError
from moto.cloudformation import cloudformation_backends
import datetime
import time
import uuid
import itertools
import json
import yaml
import hashlib
from .utils import parameter_arn
from .exceptions import (
ValidationException,
InvalidFilterValue,
InvalidFilterOption,
InvalidFilterKey,
ParameterVersionLabelLimitExceeded,
ParameterVersionNotFound,
ParameterNotFound,
DocumentAlreadyExists,
InvalidDocumentOperation,
AccessDeniedException,
InvalidDocument,
InvalidDocumentContent,
InvalidDocumentVersion,
DuplicateDocumentVersionName,
DuplicateDocumentContent,
)
class Parameter(BaseModel):
def __init__(
self,
name,
value,
type,
description,
allowed_pattern,
keyid,
last_modified_date,
version,
):
self.name = name
self.type = type
self.description = description
self.allowed_pattern = allowed_pattern
self.keyid = keyid
self.last_modified_date = last_modified_date
self.version = version
self.labels = []
if self.type == "SecureString":
if not self.keyid:
self.keyid = "alias/aws/ssm"
self.value = self.encrypt(value)
else:
self.value = value
def encrypt(self, value):
return "kms:{}:".format(self.keyid) + value
def decrypt(self, value):
if self.type != "SecureString":
return value
prefix = "kms:{}:".format(self.keyid or "default")
if value.startswith(prefix):
return value[len(prefix) :]
def response_object(self, decrypt=False, region=None):
r = {
"Name": self.name,
"Type": self.type,
"Value": self.decrypt(self.value) if decrypt else self.value,
"Version": self.version,
"LastModifiedDate": round(self.last_modified_date, 3),
}
if region:
r["ARN"] = parameter_arn(region, self.name)
return r
def describe_response_object(self, decrypt=False, include_labels=False):
r = self.response_object(decrypt)
r["LastModifiedDate"] = round(self.last_modified_date, 3)
r["LastModifiedUser"] = "N/A"
if self.description:
r["Description"] = self.description
if self.keyid:
r["KeyId"] = self.keyid
if self.allowed_pattern:
r["AllowedPattern"] = self.allowed_pattern
if include_labels:
r["Labels"] = self.labels
return r
MAX_TIMEOUT_SECONDS = 3600
def generate_ssm_doc_param_list(parameters):
if not parameters:
return None
param_list = []
for param_name, param_info in parameters.items():
final_dict = {}
final_dict["Name"] = param_name
final_dict["Type"] = param_info["type"]
final_dict["Description"] = param_info["description"]
if (
param_info["type"] == "StringList"
or param_info["type"] == "StringMap"
or param_info["type"] == "MapList"
):
final_dict["DefaultValue"] = json.dumps(param_info["default"])
else:
final_dict["DefaultValue"] = str(param_info["default"])
param_list.append(final_dict)
return param_list
class Document(BaseModel):
def __init__(
self,
name,
version_name,
content,
document_type,
document_format,
requires,
attachments,
target_type,
tags,
document_version="1",
):
self.name = name
self.version_name = version_name
self.content = content
self.document_type = document_type
self.document_format = document_format
self.requires = requires
self.attachments = attachments
self.target_type = target_type
self.tags = tags
self.status = "Active"
self.document_version = document_version
self.owner = ACCOUNT_ID
self.created_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
if document_format == "JSON":
try:
content_json = json.loads(content)
except ValueError:
# Python2
raise InvalidDocumentContent(
"The content for the document is not valid."
)
except json.decoder.JSONDecodeError:
raise InvalidDocumentContent(
"The content for the document is not valid."
)
elif document_format == "YAML":
try:
content_json = yaml.safe_load(content)
except yaml.YAMLError:
raise InvalidDocumentContent(
"The content for the document is not valid."
)
else:
raise ValidationException("Invalid document format " + str(document_format))
self.content_json = content_json
try:
self.schema_version = str(content_json["schemaVersion"])
self.description = content_json.get("description")
self.outputs = content_json.get("outputs")
self.files = content_json.get("files")
# TODO add platformType (requires mapping the ssm actions to OS's this isn't well documented)
self.platform_types = ["Not Implemented (moto)"]
self.parameter_list = generate_ssm_doc_param_list(
content_json.get("parameters")
)
if (
self.schema_version == "0.3"
or self.schema_version == "2.0"
or self.schema_version == "2.2"
):
self.mainSteps = content_json["mainSteps"]
elif self.schema_version == "1.2":
self.runtimeConfig = content_json.get("runtimeConfig")
except KeyError:
raise InvalidDocumentContent("The content for the document is not valid.")
class Command(BaseModel):
def __init__(
self,
comment="",
document_name="",
timeout_seconds=MAX_TIMEOUT_SECONDS,
instance_ids=None,
max_concurrency="",
max_errors="",
notification_config=None,
output_s3_bucket_name="",
output_s3_key_prefix="",
output_s3_region="",
parameters=None,
service_role_arn="",
targets=None,
backend_region="us-east-1",
):
if instance_ids is None:
instance_ids = []
if notification_config is None:
notification_config = {}
if parameters is None:
parameters = {}
if targets is None:
targets = []
self.error_count = 0
self.completed_count = len(instance_ids)
self.target_count = len(instance_ids)
self.command_id = str(uuid.uuid4())
self.status = "Success"
self.status_details = "Details placeholder"
self.requested_date_time = datetime.datetime.now()
self.requested_date_time_iso = self.requested_date_time.isoformat()
expires_after = self.requested_date_time + datetime.timedelta(
0, timeout_seconds
)
self.expires_after = expires_after.isoformat()
self.comment = comment
self.document_name = document_name
self.instance_ids = instance_ids
self.max_concurrency = max_concurrency
self.max_errors = max_errors
self.notification_config = notification_config
self.output_s3_bucket_name = output_s3_bucket_name
self.output_s3_key_prefix = output_s3_key_prefix
self.output_s3_region = output_s3_region
self.parameters = parameters
self.service_role_arn = service_role_arn
self.targets = targets
self.backend_region = backend_region
# Get instance ids from a cloud formation stack target.
stack_instance_ids = [
self.get_instance_ids_by_stack_ids(target["Values"])
for target in self.targets
if target["Key"] == "tag:aws:cloudformation:stack-name"
]
self.instance_ids += list(itertools.chain.from_iterable(stack_instance_ids))
# Create invocations with a single run command plugin.
self.invocations = []
for instance_id in self.instance_ids:
self.invocations.append(
self.invocation_response(instance_id, "aws:runShellScript")
)
def get_instance_ids_by_stack_ids(self, stack_ids):
instance_ids = []
cloudformation_backend = cloudformation_backends[self.backend_region]
for stack_id in stack_ids:
stack_resources = cloudformation_backend.list_stack_resources(stack_id)
instance_resources = [
instance.id
for instance in stack_resources
if instance.type == "AWS::EC2::Instance"
]
instance_ids.extend(instance_resources)
return instance_ids
def response_object(self):
r = {
"CommandId": self.command_id,
"Comment": self.comment,
"CompletedCount": self.completed_count,
"DocumentName": self.document_name,
"ErrorCount": self.error_count,
"ExpiresAfter": self.expires_after,
"InstanceIds": self.instance_ids,
"MaxConcurrency": self.max_concurrency,
"MaxErrors": self.max_errors,
"NotificationConfig": self.notification_config,
"OutputS3Region": self.output_s3_region,
"OutputS3BucketName": self.output_s3_bucket_name,
"OutputS3KeyPrefix": self.output_s3_key_prefix,
"Parameters": self.parameters,
"RequestedDateTime": self.requested_date_time_iso,
"ServiceRole": self.service_role_arn,
"Status": self.status,
"StatusDetails": self.status_details,
"TargetCount": self.target_count,
"Targets": self.targets,
}
return r
def invocation_response(self, instance_id, plugin_name):
# Calculate elapsed time from requested time and now. Use a hardcoded
# elapsed time since there is no easy way to convert a timedelta to
# an ISO 8601 duration string.
elapsed_time_iso = "PT5M"
elapsed_time_delta = datetime.timedelta(minutes=5)
end_time = self.requested_date_time + elapsed_time_delta
r = {
"CommandId": self.command_id,
"InstanceId": instance_id,
"Comment": self.comment,
"DocumentName": self.document_name,
"PluginName": plugin_name,
"ResponseCode": 0,
"ExecutionStartDateTime": self.requested_date_time_iso,
"ExecutionElapsedTime": elapsed_time_iso,
"ExecutionEndDateTime": end_time.isoformat(),
"Status": "Success",
"StatusDetails": "Success",
"StandardOutputContent": "",
"StandardOutputUrl": "",
"StandardErrorContent": "",
}
return r
def get_invocation(self, instance_id, plugin_name):
invocation = next(
(
invocation
for invocation in self.invocations
if invocation["InstanceId"] == instance_id
),
None,
)
if invocation is None:
raise RESTError(
"InvocationDoesNotExist",
"An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation",
)
if plugin_name is not None and invocation["PluginName"] != plugin_name:
raise RESTError(
"InvocationDoesNotExist",
"An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation",
)
return invocation
def _validate_document_format(document_format):
aws_doc_formats = ["JSON", "YAML"]
if document_format not in aws_doc_formats:
raise ValidationException("Invalid document format " + str(document_format))
def _validate_document_info(content, name, document_type, document_format, strict=True):
aws_ssm_name_regex = r"^[a-zA-Z0-9_\-.]{3,128}$"
aws_name_reject_list = ["aws-", "amazon", "amzn"]
aws_doc_types = [
"Command",
"Policy",
"Automation",
"Session",
"Package",
"ApplicationConfiguration",
"ApplicationConfigurationSchema",
"DeploymentStrategy",
"ChangeCalendar",
]
_validate_document_format(document_format)
if not content:
raise ValidationException("Content is required")
if list(filter(name.startswith, aws_name_reject_list)):
raise ValidationException("Invalid document name " + str(name))
ssm_name_pattern = re.compile(aws_ssm_name_regex)
if not ssm_name_pattern.match(name):
raise ValidationException("Invalid document name " + str(name))
if strict and document_type not in aws_doc_types:
# Update document doesn't use document type
raise ValidationException("Invalid document type " + str(document_type))
def _document_filter_equal_comparator(keyed_value, filter):
for v in filter["Values"]:
if keyed_value == v:
return True
return False
def _document_filter_list_includes_comparator(keyed_value_list, filter):
for v in filter["Values"]:
if v in keyed_value_list:
return True
return False
def _document_filter_match(filters, ssm_doc):
for filter in filters:
if filter["Key"] == "Name" and not _document_filter_equal_comparator(
ssm_doc.name, filter
):
return False
elif filter["Key"] == "Owner":
if len(filter["Values"]) != 1:
raise ValidationException("Owner filter can only have one value.")
if filter["Values"][0] == "Self":
# Update to running account ID
filter["Values"][0] = ACCOUNT_ID
if not _document_filter_equal_comparator(ssm_doc.owner, filter):
return False
elif filter[
"Key"
] == "PlatformTypes" and not _document_filter_list_includes_comparator(
ssm_doc.platform_types, filter
):
return False
elif filter["Key"] == "DocumentType" and not _document_filter_equal_comparator(
ssm_doc.document_type, filter
):
return False
elif filter["Key"] == "TargetType" and not _document_filter_equal_comparator(
ssm_doc.target_type, filter
):
return False
return True
class SimpleSystemManagerBackend(BaseBackend):
def __init__(self, region_name=None):
super(SimpleSystemManagerBackend, self).__init__()
# each value is a list of all of the versions for a parameter
# to get the current value, grab the last item of the list
self._parameters = defaultdict(list)
self._resource_tags = defaultdict(lambda: defaultdict(dict))
self._commands = []
self._errors = []
self._documents = defaultdict(dict)
self._region = region_name
def reset(self):
region_name = self._region
self.__dict__ = {}
self.__init__(region_name)
def _generate_document_description(self, document):
latest = self._documents[document.name]["latest_version"]
default_version = self._documents[document.name]["default_version"]
base = {
"Hash": hashlib.sha256(document.content.encode("utf-8")).hexdigest(),
"HashType": "Sha256",
"Name": document.name,
"Owner": document.owner,
"CreatedDate": document.created_date,
"Status": document.status,
"DocumentVersion": document.document_version,
"Description": document.description,
"Parameters": document.parameter_list,
"PlatformTypes": document.platform_types,
"DocumentType": document.document_type,
"SchemaVersion": document.schema_version,
"LatestVersion": latest,
"DefaultVersion": default_version,
"DocumentFormat": document.document_format,
}
if document.version_name:
base["VersionName"] = document.version_name
if document.target_type:
base["TargetType"] = document.target_type
if document.tags:
base["Tags"] = document.tags
return base
def _generate_document_information(self, ssm_document, document_format):
base = {
"Name": ssm_document.name,
"DocumentVersion": ssm_document.document_version,
"Status": ssm_document.status,
"Content": ssm_document.content,
"DocumentType": ssm_document.document_type,
"DocumentFormat": document_format,
}
if document_format == "JSON":
base["Content"] = json.dumps(ssm_document.content_json)
elif document_format == "YAML":
base["Content"] = yaml.dump(ssm_document.content_json)
else:
raise ValidationException("Invalid document format " + str(document_format))
if ssm_document.version_name:
base["VersionName"] = ssm_document.version_name
if ssm_document.requires:
base["Requires"] = ssm_document.requires
if ssm_document.attachments:
base["AttachmentsContent"] = ssm_document.attachments
return base
def _generate_document_list_information(self, ssm_document):
base = {
"Name": ssm_document.name,
"Owner": ssm_document.owner,
"DocumentVersion": ssm_document.document_version,
"DocumentType": ssm_document.document_type,
"SchemaVersion": ssm_document.schema_version,
"DocumentFormat": ssm_document.document_format,
}
if ssm_document.version_name:
base["VersionName"] = ssm_document.version_name
if ssm_document.platform_types:
base["PlatformTypes"] = ssm_document.platform_types
if ssm_document.target_type:
base["TargetType"] = ssm_document.target_type
if ssm_document.tags:
base["Tags"] = ssm_document.tags
if ssm_document.requires:
base["Requires"] = ssm_document.requires
return base
def create_document(
self,
content,
requires,
attachments,
name,
version_name,
document_type,
document_format,
target_type,
tags,
):
ssm_document = Document(
name=name,
version_name=version_name,
content=content,
document_type=document_type,
document_format=document_format,
requires=requires,
attachments=attachments,
target_type=target_type,
tags=tags,
)
_validate_document_info(
content=content,
name=name,
document_type=document_type,
document_format=document_format,
)
if self._documents.get(ssm_document.name):
raise DocumentAlreadyExists("The specified document already exists.")
self._documents[ssm_document.name] = {
"documents": {ssm_document.document_version: ssm_document},
"default_version": ssm_document.document_version,
"latest_version": ssm_document.document_version,
}
return self._generate_document_description(ssm_document)
def delete_document(self, name, document_version, version_name, force):
documents = self._documents.get(name, {}).get("documents", {})
keys_to_delete = set()
if documents:
default_version = self._documents[name]["default_version"]
if (
documents[default_version].document_type
== "ApplicationConfigurationSchema"
and not force
):
raise InvalidDocumentOperation(
"You attempted to delete a document while it is still shared. "
"You must stop sharing the document before you can delete it."
)
if document_version and document_version == default_version:
raise InvalidDocumentOperation(
"Default version of the document can't be deleted."
)
if document_version or version_name:
# We delete only a specific version
delete_doc = self._find_document(name, document_version, version_name)
# we can't delete only the default version
if (
delete_doc
and delete_doc.document_version == default_version
and len(documents) != 1
):
raise InvalidDocumentOperation(
"Default version of the document can't be deleted."
)
if delete_doc:
keys_to_delete.add(delete_doc.document_version)
else:
raise InvalidDocument("The specified document does not exist.")
else:
# We are deleting all versions
keys_to_delete = set(documents.keys())
for key in keys_to_delete:
del self._documents[name]["documents"][key]
if len(self._documents[name]["documents"].keys()) == 0:
del self._documents[name]
else:
old_latest = self._documents[name]["latest_version"]
if old_latest not in self._documents[name]["documents"].keys():
leftover_keys = self._documents[name]["documents"].keys()
int_keys = []
for key in leftover_keys:
int_keys.append(int(key))
self._documents[name]["latest_version"] = str(sorted(int_keys)[-1])
else:
raise InvalidDocument("The specified document does not exist.")
def _find_document(
self, name, document_version=None, version_name=None, strict=True
):
if not self._documents.get(name):
raise InvalidDocument("The specified document does not exist.")
documents = self._documents[name]["documents"]
ssm_document = None
if not version_name and not document_version:
# Retrieve default version
default_version = self._documents[name]["default_version"]
ssm_document = documents.get(default_version)
elif version_name and document_version:
for doc_version, document in documents.items():
if (
doc_version == document_version
and document.version_name == version_name
):
ssm_document = document
break
else:
for doc_version, document in documents.items():
if document_version and doc_version == document_version:
ssm_document = document
break
if version_name and document.version_name == version_name:
ssm_document = document
break
if strict and not ssm_document:
raise InvalidDocument("The specified document does not exist.")
return ssm_document
def get_document(self, name, document_version, version_name, document_format):
ssm_document = self._find_document(name, document_version, version_name)
if not document_format:
document_format = ssm_document.document_format
else:
_validate_document_format(document_format=document_format)
return self._generate_document_information(ssm_document, document_format)
def update_document_default_version(self, name, document_version):
ssm_document = self._find_document(name, document_version=document_version)
self._documents[name]["default_version"] = document_version
base = {
"Name": ssm_document.name,
"DefaultVersion": document_version,
}
if ssm_document.version_name:
base["DefaultVersionName"] = ssm_document.version_name
return base
def update_document(
self,
content,
attachments,
name,
version_name,
document_version,
document_format,
target_type,
):
_validate_document_info(
content=content,
name=name,
document_type=None,
document_format=document_format,
strict=False,
)
if not self._documents.get(name):
raise InvalidDocument("The specified document does not exist.")
if (
self._documents[name]["latest_version"] != document_version
and document_version != "$LATEST"
):
raise InvalidDocumentVersion(
"The document version is not valid or does not exist."
)
if version_name and self._find_document(
name, version_name=version_name, strict=False
):
raise DuplicateDocumentVersionName(
"The specified version name is a duplicate."
)
old_ssm_document = self._find_document(name)
new_ssm_document = Document(
name=name,
version_name=version_name,
content=content,
document_type=old_ssm_document.document_type,
document_format=document_format,
requires=old_ssm_document.requires,
attachments=attachments,
target_type=target_type,
tags=old_ssm_document.tags,
document_version=str(int(self._documents[name]["latest_version"]) + 1),
)
for doc_version, document in self._documents[name]["documents"].items():
if document.content == new_ssm_document.content:
raise DuplicateDocumentContent(
"The content of the association document matches another document. "
"Change the content of the document and try again."
)
self._documents[name]["latest_version"] = str(
int(self._documents[name]["latest_version"]) + 1
)
self._documents[name]["documents"][
new_ssm_document.document_version
] = new_ssm_document
return self._generate_document_description(new_ssm_document)
def describe_document(self, name, document_version, version_name):
ssm_document = self._find_document(name, document_version, version_name)
return self._generate_document_description(ssm_document)
def list_documents(
self, document_filter_list, filters, max_results=10, next_token="0"
):
if document_filter_list:
raise ValidationException(
"DocumentFilterList is deprecated. Instead use Filters."
)
next_token = int(next_token)
results = []
dummy_token_tracker = 0
# Sort to maintain next token adjacency
for document_name, document_bundle in sorted(self._documents.items()):
if len(results) == max_results:
# There's still more to go so we need a next token
return results, str(next_token + len(results))
if dummy_token_tracker < next_token:
dummy_token_tracker = dummy_token_tracker + 1
continue
default_version = document_bundle["default_version"]
ssm_doc = self._documents[document_name]["documents"][default_version]
if filters and not _document_filter_match(filters, ssm_doc):
# If we have filters enabled, and we don't match them,
continue
else:
results.append(self._generate_document_list_information(ssm_doc))
# If we've fallen out of the loop, theres no more documents. No next token.
return results, ""
def delete_parameter(self, name):
return self._parameters.pop(name, None)
def delete_parameters(self, names):
result = []
for name in names:
try:
del self._parameters[name]
result.append(name)
except KeyError:
pass
return result
def describe_parameters(self, filters, parameter_filters):
if filters and parameter_filters:
raise ValidationException(
"You can use either Filters or ParameterFilters in a single request."
)
self._validate_parameter_filters(parameter_filters, by_path=False)
result = []
for param_name in self._parameters:
ssm_parameter = self.get_parameter(param_name, False)
if not self._match_filters(ssm_parameter, parameter_filters):
continue
if filters:
for filter in filters:
if filter["Key"] == "Name":
k = ssm_parameter.name
for v in filter["Values"]:
if k.startswith(v):
result.append(ssm_parameter)
break
elif filter["Key"] == "Type":
k = ssm_parameter.type
for v in filter["Values"]:
if k == v:
result.append(ssm_parameter)
break
elif filter["Key"] == "KeyId":
k = ssm_parameter.keyid
if k:
for v in filter["Values"]:
if k == v:
result.append(ssm_parameter)
break
continue
result.append(ssm_parameter)
return result
def _validate_parameter_filters(self, parameter_filters, by_path):
for index, filter_obj in enumerate(parameter_filters or []):
key = filter_obj["Key"]
values = filter_obj.get("Values", [])
if key == "Path":
option = filter_obj.get("Option", "OneLevel")
else:
option = filter_obj.get("Option", "Equals")
if not re.match(r"^tag:.+|Name|Type|KeyId|Path|Label|Tier$", key):
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.key".format(
index=(index + 1)
),
value=key,
constraint="Member must satisfy regular expression pattern: tag:.+|Name|Type|KeyId|Path|Label|Tier",
)
)
if len(key) > 132:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.key".format(
index=(index + 1)
),
value=key,
constraint="Member must have length less than or equal to 132",
)
)
if len(option) > 10:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.option".format(
index=(index + 1)
),
value="over 10 chars",
constraint="Member must have length less than or equal to 10",
)
)
if len(values) > 50:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.values".format(
index=(index + 1)
),
value=values,
constraint="Member must have length less than or equal to 50",
)
)
if any(len(value) > 1024 for value in values):
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.values".format(
index=(index + 1)
),
value=values,
constraint="[Member must have length less than or equal to 1024, Member must have length greater than or equal to 1]",
)
)
self._raise_errors()
filter_keys = []
for filter_obj in parameter_filters or []:
key = filter_obj["Key"]
values = filter_obj.get("Values")
if key == "Path":
option = filter_obj.get("Option", "OneLevel")
else:
option = filter_obj.get("Option", "Equals")
if not by_path and key == "Label":
raise InvalidFilterKey(
"The following filter key is not valid: Label. Valid filter keys include: [Path, Name, Type, KeyId, Tier]."
)
if by_path and key in ["Name", "Path", "Tier"]:
raise InvalidFilterKey(
"The following filter key is not valid: {key}. Valid filter keys include: [Type, KeyId].".format(
key=key
)
)
if not values:
raise InvalidFilterValue(
"The following filter values are missing : null for filter key Name."
)
if key in filter_keys:
raise InvalidFilterKey(
"The following filter is duplicated in the request: Name. A request can contain only one occurrence of a specific filter."
)
if key == "Path":
if option not in ["Recursive", "OneLevel"]:
raise InvalidFilterOption(
"The following filter option is not valid: {option}. Valid options include: [Recursive, OneLevel].".format(
option=option
)
)
if any(value.lower().startswith(("/aws", "/ssm")) for value in values):
raise ValidationException(
'Filters for common parameters can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"When using global parameters, please specify within a global namespace."
)
for value in values:
if value.lower().startswith(("/aws", "/ssm")):
raise ValidationException(
'Filters for common parameters can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"When using global parameters, please specify within a global namespace."
)
if (
"//" in value
or not value.startswith("/")
or not re.match("^[a-zA-Z0-9_.-/]*$", value)
):
raise ValidationException(
'The parameter doesn\'t meet the parameter name requirements. The parameter name must begin with a forward slash "/". '
'It can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"It must use only letters, numbers, or the following symbols: . (period), - (hyphen), _ (underscore). "
'Special characters are not allowed. All sub-paths, if specified, must use the forward slash symbol "/". '
"Valid example: /get/parameters2-/by1./path0_."
)
if key == "Tier":
for value in values:
if value not in ["Standard", "Advanced", "Intelligent-Tiering"]:
raise InvalidFilterOption(
"The following filter value is not valid: {value}. Valid values include: [Standard, Advanced, Intelligent-Tiering].".format(
value=value
)
)
if key == "Type":
for value in values:
if value not in ["String", "StringList", "SecureString"]:
raise InvalidFilterOption(
"The following filter value is not valid: {value}. Valid values include: [String, StringList, SecureString].".format(
value=value
)
)
allowed_options = ["Equals", "BeginsWith"]
if key == "Name":
allowed_options += ["Contains"]
if key != "Path" and option not in allowed_options:
raise InvalidFilterOption(
"The following filter option is not valid: {option}. Valid options include: [BeginsWith, Equals].".format(
option=option
)
)
filter_keys.append(key)
def _format_error(self, key, value, constraint):
return 'Value "{value}" at "{key}" failed to satisfy constraint: {constraint}'.format(
constraint=constraint, key=key, value=value
)
def _raise_errors(self):
if self._errors:
count = len(self._errors)
plural = "s" if len(self._errors) > 1 else ""
errors = "; ".join(self._errors)
self._errors = [] # reset collected errors
raise ValidationException(
"{count} validation error{plural} detected: {errors}".format(
count=count, plural=plural, errors=errors
)
)
def get_all_parameters(self):
result = []
for k, _ in self._parameters.items():
result.append(self._parameters[k])
return result
def get_parameters(self, names, with_decryption):
result = []
if len(names) > 10:
raise ValidationException(
"1 validation error detected: "
"Value '[{}]' at 'names' failed to satisfy constraint: "
"Member must have length less than or equal to 10.".format(
", ".join(names)
)
)
for name in names:
if name in self._parameters:
result.append(self.get_parameter(name, with_decryption))
return result
def get_parameters_by_path(
self,
path,
with_decryption,
recursive,
filters=None,
next_token=None,
max_results=10,
):
"""Implement the get-parameters-by-path-API in the backend."""
self._validate_parameter_filters(filters, by_path=True)
result = []
# path could be with or without a trailing /. we handle this
# difference here.
path = path.rstrip("/") + "/"
for param_name in self._parameters:
if path != "/" and not param_name.startswith(path):
continue
if "/" in param_name[len(path) + 1 :] and not recursive:
continue
if not self._match_filters(
self.get_parameter(param_name, with_decryption), filters
):
continue
result.append(self.get_parameter(param_name, with_decryption))
return self._get_values_nexttoken(result, max_results, next_token)
def _get_values_nexttoken(self, values_list, max_results, next_token=None):
if next_token is None:
next_token = 0
next_token = int(next_token)
max_results = int(max_results)
values = values_list[next_token : next_token + max_results]
if len(values) == max_results:
next_token = str(next_token + max_results)
else:
next_token = None
return values, next_token
def get_parameter_history(self, name, with_decryption):
if name in self._parameters:
return self._parameters[name]
return None
def _match_filters(self, parameter, filters=None):
"""Return True if the given parameter matches all the filters"""
for filter_obj in filters or []:
key = filter_obj["Key"]
values = filter_obj.get("Values", [])
if key == "Path":
option = filter_obj.get("Option", "OneLevel")
else:
option = filter_obj.get("Option", "Equals")
what = None
if key == "KeyId":
what = parameter.keyid
elif key == "Name":
what = "/" + parameter.name.lstrip("/")
if option != "Contains":
values = ["/" + value.lstrip("/") for value in values]
elif key == "Path":
what = "/" + parameter.name.lstrip("/")
values = ["/" + value.strip("/") for value in values]
elif key == "Type":
what = parameter.type
if what is None:
return False
elif option == "BeginsWith" and not any(
what.startswith(value) for value in values
):
return False
elif option == "Contains" and not any(value in what for value in values):
return False
elif option == "Equals" and not any(what == value for value in values):
return False
elif option == "OneLevel":
if any(value == "/" and len(what.split("/")) == 2 for value in values):
continue
elif any(
value != "/"
and what.startswith(value + "/")
and len(what.split("/")) - 1 == len(value.split("/"))
for value in values
):
continue
else:
return False
elif option == "Recursive":
if any(value == "/" for value in values):
continue
elif any(what.startswith(value + "/") for value in values):
continue
else:
return False
# True if no false match (or no filters at all)
return True
def get_parameter(self, name, with_decryption):
name_parts = name.split(":")
name_prefix = name_parts[0]
if len(name_parts) > 2:
return None
if name_prefix in self._parameters:
if len(name_parts) == 1:
return self._parameters[name][-1]
if len(name_parts) == 2:
version_or_label = name_parts[1]
parameters = self._parameters[name_prefix]
if version_or_label.isdigit():
result = list(
filter(lambda x: str(x.version) == version_or_label, parameters)
)
if len(result) > 0:
return result[-1]
result = list(
filter(lambda x: version_or_label in x.labels, parameters)
)
if len(result) > 0:
return result[-1]
return None
def label_parameter_version(self, name, version, labels):
previous_parameter_versions = self._parameters[name]
if not previous_parameter_versions:
raise ParameterNotFound("Parameter %s not found." % name)
found_parameter = None
labels_needing_removal = []
if not version:
version = 1
for parameter in previous_parameter_versions:
if parameter.version >= version:
version = parameter.version
for parameter in previous_parameter_versions:
if parameter.version == version:
found_parameter = parameter
else:
for label in labels:
if label in parameter.labels:
labels_needing_removal.append(label)
if not found_parameter:
raise ParameterVersionNotFound(
"Systems Manager could not find version %s of %s. "
"Verify the version and try again." % (version, name)
)
labels_to_append = []
invalid_labels = []
for label in labels:
if (
label.startswith("aws")
or label.startswith("ssm")
or label[:1].isdigit()
or not re.match(r"^[a-zA-z0-9_\.\-]*$", label)
):
invalid_labels.append(label)
continue
if len(label) > 100:
raise ValidationException(
"1 validation error detected: "
"Value '[%s]' at 'labels' failed to satisfy constraint: "
"Member must satisfy constraint: "
"[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]"
% label
)
continue
if label not in found_parameter.labels:
labels_to_append.append(label)
if (len(found_parameter.labels) + len(labels_to_append)) > 10:
raise ParameterVersionLabelLimitExceeded(
"An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: "
"A parameter version can have maximum 10 labels."
"Move one or more labels to another version and try again."
)
found_parameter.labels = found_parameter.labels + labels_to_append
for parameter in previous_parameter_versions:
if parameter.version != version:
for label in parameter.labels[:]:
if label in labels_needing_removal:
parameter.labels.remove(label)
return [invalid_labels, version]
def put_parameter(
self, name, description, value, type, allowed_pattern, keyid, overwrite
):
if name.lower().lstrip("/").startswith("aws") or name.lower().lstrip(
"/"
).startswith("ssm"):
is_path = name.count("/") > 1
if name.lower().startswith("/aws") and is_path:
raise AccessDeniedException(
"No access to reserved parameter name: {name}.".format(name=name)
)
if not is_path:
invalid_prefix_error = 'Parameter name: can\'t be prefixed with "aws" or "ssm" (case-insensitive).'
else:
invalid_prefix_error = (
'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). '
"If formed as a path, it can consist of sub-paths divided by slash symbol; each sub-path can be "
"formed as a mix of letters, numbers and the following 3 symbols .-_"
)
raise ValidationException(invalid_prefix_error)
previous_parameter_versions = self._parameters[name]
if len(previous_parameter_versions) == 0:
previous_parameter = None
version = 1
else:
previous_parameter = previous_parameter_versions[-1]
version = previous_parameter.version + 1
if not overwrite:
return
last_modified_date = time.time()
self._parameters[name].append(
Parameter(
name,
value,
type,
description,
allowed_pattern,
keyid,
last_modified_date,
version,
)
)
return version
def add_tags_to_resource(self, resource_type, resource_id, tags):
for key, value in tags.items():
self._resource_tags[resource_type][resource_id][key] = value
def remove_tags_from_resource(self, resource_type, resource_id, keys):
tags = self._resource_tags[resource_type][resource_id]
for key in keys:
if key in tags:
del tags[key]
def list_tags_for_resource(self, resource_type, resource_id):
return self._resource_tags[resource_type][resource_id]
def send_command(self, **kwargs):
command = Command(
comment=kwargs.get("Comment", ""),
document_name=kwargs.get("DocumentName"),
timeout_seconds=kwargs.get("TimeoutSeconds", 3600),
instance_ids=kwargs.get("InstanceIds", []),
max_concurrency=kwargs.get("MaxConcurrency", "50"),
max_errors=kwargs.get("MaxErrors", "0"),
notification_config=kwargs.get(
"NotificationConfig",
{
"NotificationArn": "string",
"NotificationEvents": ["Success"],
"NotificationType": "Command",
},
),
output_s3_bucket_name=kwargs.get("OutputS3BucketName", ""),
output_s3_key_prefix=kwargs.get("OutputS3KeyPrefix", ""),
output_s3_region=kwargs.get("OutputS3Region", ""),
parameters=kwargs.get("Parameters", {}),
service_role_arn=kwargs.get("ServiceRoleArn", ""),
targets=kwargs.get("Targets", []),
backend_region=self._region,
)
self._commands.append(command)
return {"Command": command.response_object()}
def list_commands(self, **kwargs):
"""
https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_ListCommands.html
"""
commands = self._commands
command_id = kwargs.get("CommandId", None)
if command_id:
commands = [self.get_command_by_id(command_id)]
instance_id = kwargs.get("InstanceId", None)
if instance_id:
commands = self.get_commands_by_instance_id(instance_id)
return {"Commands": [command.response_object() for command in commands]}
def get_command_by_id(self, id):
command = next(
(command for command in self._commands if command.command_id == id), None
)
if command is None:
raise RESTError("InvalidCommandId", "Invalid command id.")
return command
def get_commands_by_instance_id(self, instance_id):
return [
command for command in self._commands if instance_id in command.instance_ids
]
def get_command_invocation(self, **kwargs):
"""
https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetCommandInvocation.html
"""
command_id = kwargs.get("CommandId")
instance_id = kwargs.get("InstanceId")
plugin_name = kwargs.get("PluginName", None)
command = self.get_command_by_id(command_id)
return command.get_invocation(instance_id, plugin_name)
ssm_backends = {}
for region in Session().get_available_regions("ssm"):
ssm_backends[region] = SimpleSystemManagerBackend(region)
for region in Session().get_available_regions("ssm", partition_name="aws-us-gov"):
ssm_backends[region] = SimpleSystemManagerBackend(region)
for region in Session().get_available_regions("ssm", partition_name="aws-cn"):
ssm_backends[region] = SimpleSystemManagerBackend(region) | moto/ssm/models.py | from __future__ import unicode_literals
import re
from boto3 import Session
from collections import defaultdict
from moto.core import ACCOUNT_ID, BaseBackend, BaseModel
from moto.core.exceptions import RESTError
from moto.cloudformation import cloudformation_backends
import datetime
import time
import uuid
import itertools
import json
import yaml
import hashlib
from .utils import parameter_arn
from .exceptions import (
ValidationException,
InvalidFilterValue,
InvalidFilterOption,
InvalidFilterKey,
ParameterVersionLabelLimitExceeded,
ParameterVersionNotFound,
ParameterNotFound,
DocumentAlreadyExists,
InvalidDocumentOperation,
AccessDeniedException,
InvalidDocument,
InvalidDocumentContent,
InvalidDocumentVersion,
DuplicateDocumentVersionName,
DuplicateDocumentContent,
)
class Parameter(BaseModel):
def __init__(
self,
name,
value,
type,
description,
allowed_pattern,
keyid,
last_modified_date,
version,
):
self.name = name
self.type = type
self.description = description
self.allowed_pattern = allowed_pattern
self.keyid = keyid
self.last_modified_date = last_modified_date
self.version = version
self.labels = []
if self.type == "SecureString":
if not self.keyid:
self.keyid = "alias/aws/ssm"
self.value = self.encrypt(value)
else:
self.value = value
def encrypt(self, value):
return "kms:{}:".format(self.keyid) + value
def decrypt(self, value):
if self.type != "SecureString":
return value
prefix = "kms:{}:".format(self.keyid or "default")
if value.startswith(prefix):
return value[len(prefix) :]
def response_object(self, decrypt=False, region=None):
r = {
"Name": self.name,
"Type": self.type,
"Value": self.decrypt(self.value) if decrypt else self.value,
"Version": self.version,
"LastModifiedDate": round(self.last_modified_date, 3),
}
if region:
r["ARN"] = parameter_arn(region, self.name)
return r
def describe_response_object(self, decrypt=False, include_labels=False):
r = self.response_object(decrypt)
r["LastModifiedDate"] = round(self.last_modified_date, 3)
r["LastModifiedUser"] = "N/A"
if self.description:
r["Description"] = self.description
if self.keyid:
r["KeyId"] = self.keyid
if self.allowed_pattern:
r["AllowedPattern"] = self.allowed_pattern
if include_labels:
r["Labels"] = self.labels
return r
MAX_TIMEOUT_SECONDS = 3600
def generate_ssm_doc_param_list(parameters):
if not parameters:
return None
param_list = []
for param_name, param_info in parameters.items():
final_dict = {}
final_dict["Name"] = param_name
final_dict["Type"] = param_info["type"]
final_dict["Description"] = param_info["description"]
if (
param_info["type"] == "StringList"
or param_info["type"] == "StringMap"
or param_info["type"] == "MapList"
):
final_dict["DefaultValue"] = json.dumps(param_info["default"])
else:
final_dict["DefaultValue"] = str(param_info["default"])
param_list.append(final_dict)
return param_list
class Document(BaseModel):
def __init__(
self,
name,
version_name,
content,
document_type,
document_format,
requires,
attachments,
target_type,
tags,
document_version="1",
):
self.name = name
self.version_name = version_name
self.content = content
self.document_type = document_type
self.document_format = document_format
self.requires = requires
self.attachments = attachments
self.target_type = target_type
self.tags = tags
self.status = "Active"
self.document_version = document_version
self.owner = ACCOUNT_ID
self.created_date = datetime.datetime.utcnow().strftime("%Y-%m-%d %H:%M:%S")
if document_format == "JSON":
try:
content_json = json.loads(content)
except ValueError:
# Python2
raise InvalidDocumentContent(
"The content for the document is not valid."
)
except json.decoder.JSONDecodeError:
raise InvalidDocumentContent(
"The content for the document is not valid."
)
elif document_format == "YAML":
try:
content_json = yaml.safe_load(content)
except yaml.YAMLError:
raise InvalidDocumentContent(
"The content for the document is not valid."
)
else:
raise ValidationException("Invalid document format " + str(document_format))
self.content_json = content_json
try:
self.schema_version = str(content_json["schemaVersion"])
self.description = content_json.get("description")
self.outputs = content_json.get("outputs")
self.files = content_json.get("files")
# TODO add platformType (requires mapping the ssm actions to OS's this isn't well documented)
self.platform_types = ["Not Implemented (moto)"]
self.parameter_list = generate_ssm_doc_param_list(
content_json.get("parameters")
)
if (
self.schema_version == "0.3"
or self.schema_version == "2.0"
or self.schema_version == "2.2"
):
self.mainSteps = content_json["mainSteps"]
elif self.schema_version == "1.2":
self.runtimeConfig = content_json.get("runtimeConfig")
except KeyError:
raise InvalidDocumentContent("The content for the document is not valid.")
class Command(BaseModel):
def __init__(
self,
comment="",
document_name="",
timeout_seconds=MAX_TIMEOUT_SECONDS,
instance_ids=None,
max_concurrency="",
max_errors="",
notification_config=None,
output_s3_bucket_name="",
output_s3_key_prefix="",
output_s3_region="",
parameters=None,
service_role_arn="",
targets=None,
backend_region="us-east-1",
):
if instance_ids is None:
instance_ids = []
if notification_config is None:
notification_config = {}
if parameters is None:
parameters = {}
if targets is None:
targets = []
self.error_count = 0
self.completed_count = len(instance_ids)
self.target_count = len(instance_ids)
self.command_id = str(uuid.uuid4())
self.status = "Success"
self.status_details = "Details placeholder"
self.requested_date_time = datetime.datetime.now()
self.requested_date_time_iso = self.requested_date_time.isoformat()
expires_after = self.requested_date_time + datetime.timedelta(
0, timeout_seconds
)
self.expires_after = expires_after.isoformat()
self.comment = comment
self.document_name = document_name
self.instance_ids = instance_ids
self.max_concurrency = max_concurrency
self.max_errors = max_errors
self.notification_config = notification_config
self.output_s3_bucket_name = output_s3_bucket_name
self.output_s3_key_prefix = output_s3_key_prefix
self.output_s3_region = output_s3_region
self.parameters = parameters
self.service_role_arn = service_role_arn
self.targets = targets
self.backend_region = backend_region
# Get instance ids from a cloud formation stack target.
stack_instance_ids = [
self.get_instance_ids_by_stack_ids(target["Values"])
for target in self.targets
if target["Key"] == "tag:aws:cloudformation:stack-name"
]
self.instance_ids += list(itertools.chain.from_iterable(stack_instance_ids))
# Create invocations with a single run command plugin.
self.invocations = []
for instance_id in self.instance_ids:
self.invocations.append(
self.invocation_response(instance_id, "aws:runShellScript")
)
def get_instance_ids_by_stack_ids(self, stack_ids):
instance_ids = []
cloudformation_backend = cloudformation_backends[self.backend_region]
for stack_id in stack_ids:
stack_resources = cloudformation_backend.list_stack_resources(stack_id)
instance_resources = [
instance.id
for instance in stack_resources
if instance.type == "AWS::EC2::Instance"
]
instance_ids.extend(instance_resources)
return instance_ids
def response_object(self):
r = {
"CommandId": self.command_id,
"Comment": self.comment,
"CompletedCount": self.completed_count,
"DocumentName": self.document_name,
"ErrorCount": self.error_count,
"ExpiresAfter": self.expires_after,
"InstanceIds": self.instance_ids,
"MaxConcurrency": self.max_concurrency,
"MaxErrors": self.max_errors,
"NotificationConfig": self.notification_config,
"OutputS3Region": self.output_s3_region,
"OutputS3BucketName": self.output_s3_bucket_name,
"OutputS3KeyPrefix": self.output_s3_key_prefix,
"Parameters": self.parameters,
"RequestedDateTime": self.requested_date_time_iso,
"ServiceRole": self.service_role_arn,
"Status": self.status,
"StatusDetails": self.status_details,
"TargetCount": self.target_count,
"Targets": self.targets,
}
return r
def invocation_response(self, instance_id, plugin_name):
# Calculate elapsed time from requested time and now. Use a hardcoded
# elapsed time since there is no easy way to convert a timedelta to
# an ISO 8601 duration string.
elapsed_time_iso = "PT5M"
elapsed_time_delta = datetime.timedelta(minutes=5)
end_time = self.requested_date_time + elapsed_time_delta
r = {
"CommandId": self.command_id,
"InstanceId": instance_id,
"Comment": self.comment,
"DocumentName": self.document_name,
"PluginName": plugin_name,
"ResponseCode": 0,
"ExecutionStartDateTime": self.requested_date_time_iso,
"ExecutionElapsedTime": elapsed_time_iso,
"ExecutionEndDateTime": end_time.isoformat(),
"Status": "Success",
"StatusDetails": "Success",
"StandardOutputContent": "",
"StandardOutputUrl": "",
"StandardErrorContent": "",
}
return r
def get_invocation(self, instance_id, plugin_name):
invocation = next(
(
invocation
for invocation in self.invocations
if invocation["InstanceId"] == instance_id
),
None,
)
if invocation is None:
raise RESTError(
"InvocationDoesNotExist",
"An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation",
)
if plugin_name is not None and invocation["PluginName"] != plugin_name:
raise RESTError(
"InvocationDoesNotExist",
"An error occurred (InvocationDoesNotExist) when calling the GetCommandInvocation operation",
)
return invocation
def _validate_document_format(document_format):
aws_doc_formats = ["JSON", "YAML"]
if document_format not in aws_doc_formats:
raise ValidationException("Invalid document format " + str(document_format))
def _validate_document_info(content, name, document_type, document_format, strict=True):
aws_ssm_name_regex = r"^[a-zA-Z0-9_\-.]{3,128}$"
aws_name_reject_list = ["aws-", "amazon", "amzn"]
aws_doc_types = [
"Command",
"Policy",
"Automation",
"Session",
"Package",
"ApplicationConfiguration",
"ApplicationConfigurationSchema",
"DeploymentStrategy",
"ChangeCalendar",
]
_validate_document_format(document_format)
if not content:
raise ValidationException("Content is required")
if list(filter(name.startswith, aws_name_reject_list)):
raise ValidationException("Invalid document name " + str(name))
ssm_name_pattern = re.compile(aws_ssm_name_regex)
if not ssm_name_pattern.match(name):
raise ValidationException("Invalid document name " + str(name))
if strict and document_type not in aws_doc_types:
# Update document doesn't use document type
raise ValidationException("Invalid document type " + str(document_type))
def _document_filter_equal_comparator(keyed_value, filter):
for v in filter["Values"]:
if keyed_value == v:
return True
return False
def _document_filter_list_includes_comparator(keyed_value_list, filter):
for v in filter["Values"]:
if v in keyed_value_list:
return True
return False
def _document_filter_match(filters, ssm_doc):
for filter in filters:
if filter["Key"] == "Name" and not _document_filter_equal_comparator(
ssm_doc.name, filter
):
return False
elif filter["Key"] == "Owner":
if len(filter["Values"]) != 1:
raise ValidationException("Owner filter can only have one value.")
if filter["Values"][0] == "Self":
# Update to running account ID
filter["Values"][0] = ACCOUNT_ID
if not _document_filter_equal_comparator(ssm_doc.owner, filter):
return False
elif filter[
"Key"
] == "PlatformTypes" and not _document_filter_list_includes_comparator(
ssm_doc.platform_types, filter
):
return False
elif filter["Key"] == "DocumentType" and not _document_filter_equal_comparator(
ssm_doc.document_type, filter
):
return False
elif filter["Key"] == "TargetType" and not _document_filter_equal_comparator(
ssm_doc.target_type, filter
):
return False
return True
class SimpleSystemManagerBackend(BaseBackend):
def __init__(self, region_name=None):
super(SimpleSystemManagerBackend, self).__init__()
# each value is a list of all of the versions for a parameter
# to get the current value, grab the last item of the list
self._parameters = defaultdict(list)
self._resource_tags = defaultdict(lambda: defaultdict(dict))
self._commands = []
self._errors = []
self._documents = defaultdict(dict)
self._region = region_name
def reset(self):
region_name = self._region
self.__dict__ = {}
self.__init__(region_name)
def _generate_document_description(self, document):
latest = self._documents[document.name]["latest_version"]
default_version = self._documents[document.name]["default_version"]
base = {
"Hash": hashlib.sha256(document.content.encode("utf-8")).hexdigest(),
"HashType": "Sha256",
"Name": document.name,
"Owner": document.owner,
"CreatedDate": document.created_date,
"Status": document.status,
"DocumentVersion": document.document_version,
"Description": document.description,
"Parameters": document.parameter_list,
"PlatformTypes": document.platform_types,
"DocumentType": document.document_type,
"SchemaVersion": document.schema_version,
"LatestVersion": latest,
"DefaultVersion": default_version,
"DocumentFormat": document.document_format,
}
if document.version_name:
base["VersionName"] = document.version_name
if document.target_type:
base["TargetType"] = document.target_type
if document.tags:
base["Tags"] = document.tags
return base
def _generate_document_information(self, ssm_document, document_format):
base = {
"Name": ssm_document.name,
"DocumentVersion": ssm_document.document_version,
"Status": ssm_document.status,
"Content": ssm_document.content,
"DocumentType": ssm_document.document_type,
"DocumentFormat": document_format,
}
if document_format == "JSON":
base["Content"] = json.dumps(ssm_document.content_json)
elif document_format == "YAML":
base["Content"] = yaml.dump(ssm_document.content_json)
else:
raise ValidationException("Invalid document format " + str(document_format))
if ssm_document.version_name:
base["VersionName"] = ssm_document.version_name
if ssm_document.requires:
base["Requires"] = ssm_document.requires
if ssm_document.attachments:
base["AttachmentsContent"] = ssm_document.attachments
return base
def _generate_document_list_information(self, ssm_document):
base = {
"Name": ssm_document.name,
"Owner": ssm_document.owner,
"DocumentVersion": ssm_document.document_version,
"DocumentType": ssm_document.document_type,
"SchemaVersion": ssm_document.schema_version,
"DocumentFormat": ssm_document.document_format,
}
if ssm_document.version_name:
base["VersionName"] = ssm_document.version_name
if ssm_document.platform_types:
base["PlatformTypes"] = ssm_document.platform_types
if ssm_document.target_type:
base["TargetType"] = ssm_document.target_type
if ssm_document.tags:
base["Tags"] = ssm_document.tags
if ssm_document.requires:
base["Requires"] = ssm_document.requires
return base
def create_document(
self,
content,
requires,
attachments,
name,
version_name,
document_type,
document_format,
target_type,
tags,
):
ssm_document = Document(
name=name,
version_name=version_name,
content=content,
document_type=document_type,
document_format=document_format,
requires=requires,
attachments=attachments,
target_type=target_type,
tags=tags,
)
_validate_document_info(
content=content,
name=name,
document_type=document_type,
document_format=document_format,
)
if self._documents.get(ssm_document.name):
raise DocumentAlreadyExists("The specified document already exists.")
self._documents[ssm_document.name] = {
"documents": {ssm_document.document_version: ssm_document},
"default_version": ssm_document.document_version,
"latest_version": ssm_document.document_version,
}
return self._generate_document_description(ssm_document)
def delete_document(self, name, document_version, version_name, force):
documents = self._documents.get(name, {}).get("documents", {})
keys_to_delete = set()
if documents:
default_version = self._documents[name]["default_version"]
if (
documents[default_version].document_type
== "ApplicationConfigurationSchema"
and not force
):
raise InvalidDocumentOperation(
"You attempted to delete a document while it is still shared. "
"You must stop sharing the document before you can delete it."
)
if document_version and document_version == default_version:
raise InvalidDocumentOperation(
"Default version of the document can't be deleted."
)
if document_version or version_name:
# We delete only a specific version
delete_doc = self._find_document(name, document_version, version_name)
# we can't delete only the default version
if (
delete_doc
and delete_doc.document_version == default_version
and len(documents) != 1
):
raise InvalidDocumentOperation(
"Default version of the document can't be deleted."
)
if delete_doc:
keys_to_delete.add(delete_doc.document_version)
else:
raise InvalidDocument("The specified document does not exist.")
else:
# We are deleting all versions
keys_to_delete = set(documents.keys())
for key in keys_to_delete:
del self._documents[name]["documents"][key]
if len(self._documents[name]["documents"].keys()) == 0:
del self._documents[name]
else:
old_latest = self._documents[name]["latest_version"]
if old_latest not in self._documents[name]["documents"].keys():
leftover_keys = self._documents[name]["documents"].keys()
int_keys = []
for key in leftover_keys:
int_keys.append(int(key))
self._documents[name]["latest_version"] = str(sorted(int_keys)[-1])
else:
raise InvalidDocument("The specified document does not exist.")
def _find_document(
self, name, document_version=None, version_name=None, strict=True
):
if not self._documents.get(name):
raise InvalidDocument("The specified document does not exist.")
documents = self._documents[name]["documents"]
ssm_document = None
if not version_name and not document_version:
# Retrieve default version
default_version = self._documents[name]["default_version"]
ssm_document = documents.get(default_version)
elif version_name and document_version:
for doc_version, document in documents.items():
if (
doc_version == document_version
and document.version_name == version_name
):
ssm_document = document
break
else:
for doc_version, document in documents.items():
if document_version and doc_version == document_version:
ssm_document = document
break
if version_name and document.version_name == version_name:
ssm_document = document
break
if strict and not ssm_document:
raise InvalidDocument("The specified document does not exist.")
return ssm_document
def get_document(self, name, document_version, version_name, document_format):
ssm_document = self._find_document(name, document_version, version_name)
if not document_format:
document_format = ssm_document.document_format
else:
_validate_document_format(document_format=document_format)
return self._generate_document_information(ssm_document, document_format)
def update_document_default_version(self, name, document_version):
ssm_document = self._find_document(name, document_version=document_version)
self._documents[name]["default_version"] = document_version
base = {
"Name": ssm_document.name,
"DefaultVersion": document_version,
}
if ssm_document.version_name:
base["DefaultVersionName"] = ssm_document.version_name
return base
def update_document(
self,
content,
attachments,
name,
version_name,
document_version,
document_format,
target_type,
):
_validate_document_info(
content=content,
name=name,
document_type=None,
document_format=document_format,
strict=False,
)
if not self._documents.get(name):
raise InvalidDocument("The specified document does not exist.")
if (
self._documents[name]["latest_version"] != document_version
and document_version != "$LATEST"
):
raise InvalidDocumentVersion(
"The document version is not valid or does not exist."
)
if version_name and self._find_document(
name, version_name=version_name, strict=False
):
raise DuplicateDocumentVersionName(
"The specified version name is a duplicate."
)
old_ssm_document = self._find_document(name)
new_ssm_document = Document(
name=name,
version_name=version_name,
content=content,
document_type=old_ssm_document.document_type,
document_format=document_format,
requires=old_ssm_document.requires,
attachments=attachments,
target_type=target_type,
tags=old_ssm_document.tags,
document_version=str(int(self._documents[name]["latest_version"]) + 1),
)
for doc_version, document in self._documents[name]["documents"].items():
if document.content == new_ssm_document.content:
raise DuplicateDocumentContent(
"The content of the association document matches another document. "
"Change the content of the document and try again."
)
self._documents[name]["latest_version"] = str(
int(self._documents[name]["latest_version"]) + 1
)
self._documents[name]["documents"][
new_ssm_document.document_version
] = new_ssm_document
return self._generate_document_description(new_ssm_document)
def describe_document(self, name, document_version, version_name):
ssm_document = self._find_document(name, document_version, version_name)
return self._generate_document_description(ssm_document)
def list_documents(
self, document_filter_list, filters, max_results=10, next_token="0"
):
if document_filter_list:
raise ValidationException(
"DocumentFilterList is deprecated. Instead use Filters."
)
next_token = int(next_token)
results = []
dummy_token_tracker = 0
# Sort to maintain next token adjacency
for document_name, document_bundle in sorted(self._documents.items()):
if len(results) == max_results:
# There's still more to go so we need a next token
return results, str(next_token + len(results))
if dummy_token_tracker < next_token:
dummy_token_tracker = dummy_token_tracker + 1
continue
default_version = document_bundle["default_version"]
ssm_doc = self._documents[document_name]["documents"][default_version]
if filters and not _document_filter_match(filters, ssm_doc):
# If we have filters enabled, and we don't match them,
continue
else:
results.append(self._generate_document_list_information(ssm_doc))
# If we've fallen out of the loop, theres no more documents. No next token.
return results, ""
def delete_parameter(self, name):
return self._parameters.pop(name, None)
def delete_parameters(self, names):
result = []
for name in names:
try:
del self._parameters[name]
result.append(name)
except KeyError:
pass
return result
def describe_parameters(self, filters, parameter_filters):
if filters and parameter_filters:
raise ValidationException(
"You can use either Filters or ParameterFilters in a single request."
)
self._validate_parameter_filters(parameter_filters, by_path=False)
result = []
for param_name in self._parameters:
ssm_parameter = self.get_parameter(param_name, False)
if not self._match_filters(ssm_parameter, parameter_filters):
continue
if filters:
for filter in filters:
if filter["Key"] == "Name":
k = ssm_parameter.name
for v in filter["Values"]:
if k.startswith(v):
result.append(ssm_parameter)
break
elif filter["Key"] == "Type":
k = ssm_parameter.type
for v in filter["Values"]:
if k == v:
result.append(ssm_parameter)
break
elif filter["Key"] == "KeyId":
k = ssm_parameter.keyid
if k:
for v in filter["Values"]:
if k == v:
result.append(ssm_parameter)
break
continue
result.append(ssm_parameter)
return result
def _validate_parameter_filters(self, parameter_filters, by_path):
for index, filter_obj in enumerate(parameter_filters or []):
key = filter_obj["Key"]
values = filter_obj.get("Values", [])
if key == "Path":
option = filter_obj.get("Option", "OneLevel")
else:
option = filter_obj.get("Option", "Equals")
if not re.match(r"^tag:.+|Name|Type|KeyId|Path|Label|Tier$", key):
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.key".format(
index=(index + 1)
),
value=key,
constraint="Member must satisfy regular expression pattern: tag:.+|Name|Type|KeyId|Path|Label|Tier",
)
)
if len(key) > 132:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.key".format(
index=(index + 1)
),
value=key,
constraint="Member must have length less than or equal to 132",
)
)
if len(option) > 10:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.option".format(
index=(index + 1)
),
value="over 10 chars",
constraint="Member must have length less than or equal to 10",
)
)
if len(values) > 50:
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.values".format(
index=(index + 1)
),
value=values,
constraint="Member must have length less than or equal to 50",
)
)
if any(len(value) > 1024 for value in values):
self._errors.append(
self._format_error(
key="parameterFilters.{index}.member.values".format(
index=(index + 1)
),
value=values,
constraint="[Member must have length less than or equal to 1024, Member must have length greater than or equal to 1]",
)
)
self._raise_errors()
filter_keys = []
for filter_obj in parameter_filters or []:
key = filter_obj["Key"]
values = filter_obj.get("Values")
if key == "Path":
option = filter_obj.get("Option", "OneLevel")
else:
option = filter_obj.get("Option", "Equals")
if not by_path and key == "Label":
raise InvalidFilterKey(
"The following filter key is not valid: Label. Valid filter keys include: [Path, Name, Type, KeyId, Tier]."
)
if by_path and key in ["Name", "Path", "Tier"]:
raise InvalidFilterKey(
"The following filter key is not valid: {key}. Valid filter keys include: [Type, KeyId].".format(
key=key
)
)
if not values:
raise InvalidFilterValue(
"The following filter values are missing : null for filter key Name."
)
if key in filter_keys:
raise InvalidFilterKey(
"The following filter is duplicated in the request: Name. A request can contain only one occurrence of a specific filter."
)
if key == "Path":
if option not in ["Recursive", "OneLevel"]:
raise InvalidFilterOption(
"The following filter option is not valid: {option}. Valid options include: [Recursive, OneLevel].".format(
option=option
)
)
if any(value.lower().startswith(("/aws", "/ssm")) for value in values):
raise ValidationException(
'Filters for common parameters can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"When using global parameters, please specify within a global namespace."
)
for value in values:
if value.lower().startswith(("/aws", "/ssm")):
raise ValidationException(
'Filters for common parameters can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"When using global parameters, please specify within a global namespace."
)
if (
"//" in value
or not value.startswith("/")
or not re.match("^[a-zA-Z0-9_.-/]*$", value)
):
raise ValidationException(
'The parameter doesn\'t meet the parameter name requirements. The parameter name must begin with a forward slash "/". '
'It can\'t be prefixed with "aws" or "ssm" (case-insensitive). '
"It must use only letters, numbers, or the following symbols: . (period), - (hyphen), _ (underscore). "
'Special characters are not allowed. All sub-paths, if specified, must use the forward slash symbol "/". '
"Valid example: /get/parameters2-/by1./path0_."
)
if key == "Tier":
for value in values:
if value not in ["Standard", "Advanced", "Intelligent-Tiering"]:
raise InvalidFilterOption(
"The following filter value is not valid: {value}. Valid values include: [Standard, Advanced, Intelligent-Tiering].".format(
value=value
)
)
if key == "Type":
for value in values:
if value not in ["String", "StringList", "SecureString"]:
raise InvalidFilterOption(
"The following filter value is not valid: {value}. Valid values include: [String, StringList, SecureString].".format(
value=value
)
)
allowed_options = ["Equals", "BeginsWith"]
if key == "Name":
allowed_options += ["Contains"]
if key != "Path" and option not in allowed_options:
raise InvalidFilterOption(
"The following filter option is not valid: {option}. Valid options include: [BeginsWith, Equals].".format(
option=option
)
)
filter_keys.append(key)
def _format_error(self, key, value, constraint):
return 'Value "{value}" at "{key}" failed to satisfy constraint: {constraint}'.format(
constraint=constraint, key=key, value=value
)
def _raise_errors(self):
if self._errors:
count = len(self._errors)
plural = "s" if len(self._errors) > 1 else ""
errors = "; ".join(self._errors)
self._errors = [] # reset collected errors
raise ValidationException(
"{count} validation error{plural} detected: {errors}".format(
count=count, plural=plural, errors=errors
)
)
def get_all_parameters(self):
result = []
for k, _ in self._parameters.items():
result.append(self._parameters[k])
return result
def get_parameters(self, names, with_decryption):
result = []
if len(names) > 10:
raise ValidationException(
"1 validation error detected: "
"Value '[{}]' at 'names' failed to satisfy constraint: "
"Member must have length less than or equal to 10.".format(
", ".join(names)
)
)
for name in names:
if name in self._parameters:
result.append(self.get_parameter(name, with_decryption))
return result
def get_parameters_by_path(
self,
path,
with_decryption,
recursive,
filters=None,
next_token=None,
max_results=10,
):
"""Implement the get-parameters-by-path-API in the backend."""
self._validate_parameter_filters(filters, by_path=True)
result = []
# path could be with or without a trailing /. we handle this
# difference here.
path = path.rstrip("/") + "/"
for param_name in self._parameters:
if path != "/" and not param_name.startswith(path):
continue
if "/" in param_name[len(path) + 1 :] and not recursive:
continue
if not self._match_filters(
self.get_parameter(param_name, with_decryption), filters
):
continue
result.append(self.get_parameter(param_name, with_decryption))
return self._get_values_nexttoken(result, max_results, next_token)
def _get_values_nexttoken(self, values_list, max_results, next_token=None):
if next_token is None:
next_token = 0
next_token = int(next_token)
max_results = int(max_results)
values = values_list[next_token : next_token + max_results]
if len(values) == max_results:
next_token = str(next_token + max_results)
else:
next_token = None
return values, next_token
def get_parameter_history(self, name, with_decryption):
if name in self._parameters:
return self._parameters[name]
return None
def _match_filters(self, parameter, filters=None):
"""Return True if the given parameter matches all the filters"""
for filter_obj in filters or []:
key = filter_obj["Key"]
values = filter_obj.get("Values", [])
if key == "Path":
option = filter_obj.get("Option", "OneLevel")
else:
option = filter_obj.get("Option", "Equals")
what = None
if key == "KeyId":
what = parameter.keyid
elif key == "Name":
what = "/" + parameter.name.lstrip("/")
if option != "Contains":
values = ["/" + value.lstrip("/") for value in values]
elif key == "Path":
what = "/" + parameter.name.lstrip("/")
values = ["/" + value.strip("/") for value in values]
elif key == "Type":
what = parameter.type
if what is None:
return False
elif option == "BeginsWith" and not any(
what.startswith(value) for value in values
):
return False
elif option == "Contains" and not any(value in what for value in values):
return False
elif option == "Equals" and not any(what == value for value in values):
return False
elif option == "OneLevel":
if any(value == "/" and len(what.split("/")) == 2 for value in values):
continue
elif any(
value != "/"
and what.startswith(value + "/")
and len(what.split("/")) - 1 == len(value.split("/"))
for value in values
):
continue
else:
return False
elif option == "Recursive":
if any(value == "/" for value in values):
continue
elif any(what.startswith(value + "/") for value in values):
continue
else:
return False
# True if no false match (or no filters at all)
return True
def get_parameter(self, name, with_decryption):
name_parts = name.split(":")
name_prefix = name_parts[0]
if len(name_parts) > 2:
return None
if name_prefix in self._parameters:
if len(name_parts) == 1:
return self._parameters[name][-1]
if len(name_parts) == 2:
version_or_label = name_parts[1]
parameters = self._parameters[name_prefix]
if version_or_label.isdigit():
result = list(
filter(lambda x: str(x.version) == version_or_label, parameters)
)
if len(result) > 0:
return result[-1]
result = list(
filter(lambda x: version_or_label in x.labels, parameters)
)
if len(result) > 0:
return result[-1]
return None
def label_parameter_version(self, name, version, labels):
previous_parameter_versions = self._parameters[name]
if not previous_parameter_versions:
raise ParameterNotFound("Parameter %s not found." % name)
found_parameter = None
labels_needing_removal = []
if not version:
version = 1
for parameter in previous_parameter_versions:
if parameter.version >= version:
version = parameter.version
for parameter in previous_parameter_versions:
if parameter.version == version:
found_parameter = parameter
else:
for label in labels:
if label in parameter.labels:
labels_needing_removal.append(label)
if not found_parameter:
raise ParameterVersionNotFound(
"Systems Manager could not find version %s of %s. "
"Verify the version and try again." % (version, name)
)
labels_to_append = []
invalid_labels = []
for label in labels:
if (
label.startswith("aws")
or label.startswith("ssm")
or label[:1].isdigit()
or not re.match(r"^[a-zA-z0-9_\.\-]*$", label)
):
invalid_labels.append(label)
continue
if len(label) > 100:
raise ValidationException(
"1 validation error detected: "
"Value '[%s]' at 'labels' failed to satisfy constraint: "
"Member must satisfy constraint: "
"[Member must have length less than or equal to 100, Member must have length greater than or equal to 1]"
% label
)
continue
if label not in found_parameter.labels:
labels_to_append.append(label)
if (len(found_parameter.labels) + len(labels_to_append)) > 10:
raise ParameterVersionLabelLimitExceeded(
"An error occurred (ParameterVersionLabelLimitExceeded) when calling the LabelParameterVersion operation: "
"A parameter version can have maximum 10 labels."
"Move one or more labels to another version and try again."
)
found_parameter.labels = found_parameter.labels + labels_to_append
for parameter in previous_parameter_versions:
if parameter.version != version:
for label in parameter.labels[:]:
if label in labels_needing_removal:
parameter.labels.remove(label)
return [invalid_labels, version]
def put_parameter(
self, name, description, value, type, allowed_pattern, keyid, overwrite
):
if name.lower().lstrip("/").startswith("aws") or name.lower().lstrip(
"/"
).startswith("ssm"):
is_path = name.count("/") > 1
if name.lower().startswith("/aws") and is_path:
raise AccessDeniedException(
"No access to reserved parameter name: {name}.".format(name=name)
)
if not is_path:
invalid_prefix_error = 'Parameter name: can\'t be prefixed with "aws" or "ssm" (case-insensitive).'
else:
invalid_prefix_error = (
'Parameter name: can\'t be prefixed with "ssm" (case-insensitive). '
"If formed as a path, it can consist of sub-paths divided by slash symbol; each sub-path can be "
"formed as a mix of letters, numbers and the following 3 symbols .-_"
)
raise ValidationException(invalid_prefix_error)
previous_parameter_versions = self._parameters[name]
if len(previous_parameter_versions) == 0:
previous_parameter = None
version = 1
else:
previous_parameter = previous_parameter_versions[-1]
version = previous_parameter.version + 1
if not overwrite:
return
last_modified_date = time.time()
self._parameters[name].append(
Parameter(
name,
value,
type,
description,
allowed_pattern,
keyid,
last_modified_date,
version,
)
)
return version
def add_tags_to_resource(self, resource_type, resource_id, tags):
for key, value in tags.items():
self._resource_tags[resource_type][resource_id][key] = value
def remove_tags_from_resource(self, resource_type, resource_id, keys):
tags = self._resource_tags[resource_type][resource_id]
for key in keys:
if key in tags:
del tags[key]
def list_tags_for_resource(self, resource_type, resource_id):
return self._resource_tags[resource_type][resource_id]
def send_command(self, **kwargs):
command = Command(
comment=kwargs.get("Comment", ""),
document_name=kwargs.get("DocumentName"),
timeout_seconds=kwargs.get("TimeoutSeconds", 3600),
instance_ids=kwargs.get("InstanceIds", []),
max_concurrency=kwargs.get("MaxConcurrency", "50"),
max_errors=kwargs.get("MaxErrors", "0"),
notification_config=kwargs.get(
"NotificationConfig",
{
"NotificationArn": "string",
"NotificationEvents": ["Success"],
"NotificationType": "Command",
},
),
output_s3_bucket_name=kwargs.get("OutputS3BucketName", ""),
output_s3_key_prefix=kwargs.get("OutputS3KeyPrefix", ""),
output_s3_region=kwargs.get("OutputS3Region", ""),
parameters=kwargs.get("Parameters", {}),
service_role_arn=kwargs.get("ServiceRoleArn", ""),
targets=kwargs.get("Targets", []),
backend_region=self._region,
)
self._commands.append(command)
return {"Command": command.response_object()}
def list_commands(self, **kwargs):
"""
https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_ListCommands.html
"""
commands = self._commands
command_id = kwargs.get("CommandId", None)
if command_id:
commands = [self.get_command_by_id(command_id)]
instance_id = kwargs.get("InstanceId", None)
if instance_id:
commands = self.get_commands_by_instance_id(instance_id)
return {"Commands": [command.response_object() for command in commands]}
def get_command_by_id(self, id):
command = next(
(command for command in self._commands if command.command_id == id), None
)
if command is None:
raise RESTError("InvalidCommandId", "Invalid command id.")
return command
def get_commands_by_instance_id(self, instance_id):
return [
command for command in self._commands if instance_id in command.instance_ids
]
def get_command_invocation(self, **kwargs):
"""
https://docs.aws.amazon.com/systems-manager/latest/APIReference/API_GetCommandInvocation.html
"""
command_id = kwargs.get("CommandId")
instance_id = kwargs.get("InstanceId")
plugin_name = kwargs.get("PluginName", None)
command = self.get_command_by_id(command_id)
return command.get_invocation(instance_id, plugin_name)
ssm_backends = {}
for region in Session().get_available_regions("ssm"):
ssm_backends[region] = SimpleSystemManagerBackend(region)
for region in Session().get_available_regions("ssm", partition_name="aws-us-gov"):
ssm_backends[region] = SimpleSystemManagerBackend(region)
for region in Session().get_available_regions("ssm", partition_name="aws-cn"):
ssm_backends[region] = SimpleSystemManagerBackend(region) | 0.379034 | 0.071656 |
import cv2
import numpy as np
import os
"""
์ด ์คํฌ๋ฆฝํธ๋ ๊ธฐ๋ณธ์ ์ผ๋ก ์์งํ ์๋ณธ ๋ฐ์ดํฐ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ๋ฐ์ดํฐ ์ฆ๊ฐ ์์
์ ์ํํ์ฌ
๋ฅ๋ฌ๋ ๋ชจ๋ธ์ด ํ์ต ํ ์ ์๋ ๋ฐ์ดํฐ์
์ ๋๋ฆฌ๊ธฐ ์ํ ์คํฌ๋ฆฝํธ์ด๋ค.
openCV ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ง์ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง ์์ ๋ฐ ์ ์ฅ์ ํ์์ผ๋, ๋์ค์ ์ฐพ์๋ณด๋
Keras ์์๋ ImageGenerator๋ฅผ ์ฌ์ฉํด์ ์ ์ฌํ ์์
์ ํ ์ ์๋ค๊ณ ํ๋ค... ใ
"""
TS_PATH = "C:\\Users\\dry8r3ad\\PycharmProjects\\catBreedClassifier\\data\\"
TS_ORIG_PATH = TS_PATH + "original\\"
TS_MODI_PATH = TS_PATH + "argumentation\\"
original_file_list = os.listdir(TS_ORIG_PATH)
blur_ks_list = [3, 5]
gaussian_std_list = [5, 10, 15, 20]
translation_list = [5, 10]
rotation_list = [-10, -5, 5, 10]
def blur_img(img, blur_ks):
return cv2.blur(img, (blur_ks, blur_ks))
def gaussian_img(img, std):
mean = 0
row, col, ch = img.shape
gauss = np.random.normal(mean, std, (row, col, ch))
img = img + gauss
return img
def translate_img(img, trans):
row, col = img.shape[:2]
m = np.float32([[1, 0, trans], [0, 1, trans]])
return cv2.warpAffine(img, m, (col, row))
def rotate_img(img, rotate):
row, col = img.shape[:2]
m = cv2.getRotationMatrix2D((col / 2, row / 2), rotate, 1)
return cv2.warpAffine(img, m, (col, row))
def data_arg(orig_file, argumentation_dir_path, original_dir_path):
idx = 0
print(original_dir_path + orig_file)
if not cv2.haveImageReader(original_dir_path + orig_file):
print("Invalid file(non-processable) was entered (filename:" + orig_file + "). Skipping")
return
img = cv2.imread(original_dir_path + orig_file)
working_path = argumentation_dir_path + orig_file
if not os.path.exists(working_path):
os.mkdir(working_path)
for blur_ks in blur_ks_list:
img = blur_img(img, blur_ks)
for gaussian_std in gaussian_std_list:
img = gaussian_img(img, gaussian_std)
for trans in translation_list:
img = translate_img(img, trans)
for rotate in rotation_list:
img = rotate_img(img, rotate)
filename = str(idx) + ".jpg"
cv2.imwrite(os.path.join(working_path, filename), img)
idx += 1
img = cv2.imread(original_dir_path + orig_file)
return
def check_exclude_breed(breed):
done_list = ["Abyssinian", "American Bobtail", "American Curl", "American Shorthair",
"American Wirehair","Applehead Siamese", "Balinese", "Bengal", "Birman", "Bombay",
"British Shorthair", "Burmese", "Burmilla", "Calico", "Canadian Hairless",
"Chartreux", "Chausie", "Chinchilla", "Cornish Rex", "Cymric", "Devon Rex",
"Dilute Calico", "Dilute Tortoiseshell", "Domestic Long Hair", "Domestic Medium Hair",
"Domestic Short Hair", "Egyptian Mau", "Exotic Shorthair", "Extra-Toes Cat - Hemingway Polydactyl",
"Havana", "Himalayan", "Japanese Bobtail", "Javanese", "Korat", "LaPerm", "Maine Coon",
"Manx", "Munchkin", "Nebelung", "Norwegian Forest Cat", "Ocicat", "Oriental Long Hair",
"Oriental Short Hair", "Oriental Tabby", "Persian", "Pixiebob", "Ragamuffin", "Ragdoll",
"Russian Blue", "Scottish Fold", "Selkirk Rex", "Siamese", "Siberian", "Silver",
"Singapura", "Snowshoe", "Somali", "Sphynx - Hairless Cat", "Tabby"]
exclude_list = ["York Chocolate", "Chinchilla", "Canadian Hairless", "Burmilla", "LaPerm",
"Cymric", "American Wirehair", "Singapura", "Chausie", "Javanese", "Somali",
"Oriental Long Hair", "Korat", "Selkirk Rex", "Chartreux", "Silver",
"Domestic Long Hair", "Domestic Medium Hair", "Domestic Short Hair"]
if breed in done_list:
return True
if breed in exclude_list:
return True
return False
def main():
original_file_list.sort()
for breed in original_file_list:
if check_exclude_breed(breed):
continue
print("Data Argumentation: " + breed)
working_breed_dir = TS_MODI_PATH + breed
if not os.path.exists(working_breed_dir):
os.mkdir(working_breed_dir)
original_dir_path = (TS_ORIG_PATH + breed)
for img_file in os.listdir(original_dir_path):
data_arg(img_file, working_breed_dir + "\\", original_dir_path + "\\")
if __name__ == "__main__":
main() | trainingModel/dataAgumentation.py | import cv2
import numpy as np
import os
"""
์ด ์คํฌ๋ฆฝํธ๋ ๊ธฐ๋ณธ์ ์ผ๋ก ์์งํ ์๋ณธ ๋ฐ์ดํฐ๋ฅผ ๊ธฐ๋ฐ์ผ๋ก ๋ฐ์ดํฐ ์ฆ๊ฐ ์์
์ ์ํํ์ฌ
๋ฅ๋ฌ๋ ๋ชจ๋ธ์ด ํ์ต ํ ์ ์๋ ๋ฐ์ดํฐ์
์ ๋๋ฆฌ๊ธฐ ์ํ ์คํฌ๋ฆฝํธ์ด๋ค.
openCV ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ฅผ ์ง์ ์ฌ์ฉํ์ฌ ์ด๋ฏธ์ง ์์ ๋ฐ ์ ์ฅ์ ํ์์ผ๋, ๋์ค์ ์ฐพ์๋ณด๋
Keras ์์๋ ImageGenerator๋ฅผ ์ฌ์ฉํด์ ์ ์ฌํ ์์
์ ํ ์ ์๋ค๊ณ ํ๋ค... ใ
"""
TS_PATH = "C:\\Users\\dry8r3ad\\PycharmProjects\\catBreedClassifier\\data\\"
TS_ORIG_PATH = TS_PATH + "original\\"
TS_MODI_PATH = TS_PATH + "argumentation\\"
original_file_list = os.listdir(TS_ORIG_PATH)
blur_ks_list = [3, 5]
gaussian_std_list = [5, 10, 15, 20]
translation_list = [5, 10]
rotation_list = [-10, -5, 5, 10]
def blur_img(img, blur_ks):
return cv2.blur(img, (blur_ks, blur_ks))
def gaussian_img(img, std):
mean = 0
row, col, ch = img.shape
gauss = np.random.normal(mean, std, (row, col, ch))
img = img + gauss
return img
def translate_img(img, trans):
row, col = img.shape[:2]
m = np.float32([[1, 0, trans], [0, 1, trans]])
return cv2.warpAffine(img, m, (col, row))
def rotate_img(img, rotate):
row, col = img.shape[:2]
m = cv2.getRotationMatrix2D((col / 2, row / 2), rotate, 1)
return cv2.warpAffine(img, m, (col, row))
def data_arg(orig_file, argumentation_dir_path, original_dir_path):
idx = 0
print(original_dir_path + orig_file)
if not cv2.haveImageReader(original_dir_path + orig_file):
print("Invalid file(non-processable) was entered (filename:" + orig_file + "). Skipping")
return
img = cv2.imread(original_dir_path + orig_file)
working_path = argumentation_dir_path + orig_file
if not os.path.exists(working_path):
os.mkdir(working_path)
for blur_ks in blur_ks_list:
img = blur_img(img, blur_ks)
for gaussian_std in gaussian_std_list:
img = gaussian_img(img, gaussian_std)
for trans in translation_list:
img = translate_img(img, trans)
for rotate in rotation_list:
img = rotate_img(img, rotate)
filename = str(idx) + ".jpg"
cv2.imwrite(os.path.join(working_path, filename), img)
idx += 1
img = cv2.imread(original_dir_path + orig_file)
return
def check_exclude_breed(breed):
done_list = ["Abyssinian", "American Bobtail", "American Curl", "American Shorthair",
"American Wirehair","Applehead Siamese", "Balinese", "Bengal", "Birman", "Bombay",
"British Shorthair", "Burmese", "Burmilla", "Calico", "Canadian Hairless",
"Chartreux", "Chausie", "Chinchilla", "Cornish Rex", "Cymric", "Devon Rex",
"Dilute Calico", "Dilute Tortoiseshell", "Domestic Long Hair", "Domestic Medium Hair",
"Domestic Short Hair", "Egyptian Mau", "Exotic Shorthair", "Extra-Toes Cat - Hemingway Polydactyl",
"Havana", "Himalayan", "Japanese Bobtail", "Javanese", "Korat", "LaPerm", "Maine Coon",
"Manx", "Munchkin", "Nebelung", "Norwegian Forest Cat", "Ocicat", "Oriental Long Hair",
"Oriental Short Hair", "Oriental Tabby", "Persian", "Pixiebob", "Ragamuffin", "Ragdoll",
"Russian Blue", "Scottish Fold", "Selkirk Rex", "Siamese", "Siberian", "Silver",
"Singapura", "Snowshoe", "Somali", "Sphynx - Hairless Cat", "Tabby"]
exclude_list = ["York Chocolate", "Chinchilla", "Canadian Hairless", "Burmilla", "LaPerm",
"Cymric", "American Wirehair", "Singapura", "Chausie", "Javanese", "Somali",
"Oriental Long Hair", "Korat", "Selkirk Rex", "Chartreux", "Silver",
"Domestic Long Hair", "Domestic Medium Hair", "Domestic Short Hair"]
if breed in done_list:
return True
if breed in exclude_list:
return True
return False
def main():
original_file_list.sort()
for breed in original_file_list:
if check_exclude_breed(breed):
continue
print("Data Argumentation: " + breed)
working_breed_dir = TS_MODI_PATH + breed
if not os.path.exists(working_breed_dir):
os.mkdir(working_breed_dir)
original_dir_path = (TS_ORIG_PATH + breed)
for img_file in os.listdir(original_dir_path):
data_arg(img_file, working_breed_dir + "\\", original_dir_path + "\\")
if __name__ == "__main__":
main() | 0.287668 | 0.530541 |
from __future__ import unicode_literals
from ..utils import int_or_none, parse_iso8601
from .telecinco import TelecincoIE
class MiTeleIE(TelecincoIE):
IE_DESC = "mitele.es"
_VALID_URL = r"https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player"
_TESTS = [
{
"url": "http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player",
"info_dict": {
"id": "FhYW1iNTE6J6H7NkQRIEzfne6t2quqPg",
"ext": "mp4",
"title": "Diario de La redacciรณn Programa 144",
"description": "md5:07c35a7b11abb05876a6a79185b58d27",
"series": "Diario de",
"season": "Season 14",
"season_number": 14,
"episode": "Tor, la web invisible",
"episode_number": 3,
"thumbnail": r"re:(?i)^https?://.*\.jpg$",
"duration": 2913,
"age_limit": 16,
"timestamp": 1471209401,
"upload_date": "20160814",
},
},
{
# no explicit title
"url": "http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player",
"info_dict": {
"id": "oyNG1iNTE6TAPP-JmCjbwfwJqqMMX3Vq",
"ext": "mp4",
"title": "Cuarto Milenio Temporada 6 Programa 226",
"description": "md5:5ff132013f0cd968ffbf1f5f3538a65f",
"series": "Cuarto Milenio",
"season": "Season 6",
"season_number": 6,
"episode": "Episode 24",
"episode_number": 24,
"thumbnail": r"re:(?i)^https?://.*\.jpg$",
"duration": 7313,
"age_limit": 12,
"timestamp": 1471209021,
"upload_date": "20160814",
},
"params": {
"skip_download": True,
},
},
{
"url": "http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player",
"only_matching": True,
},
{
"url": "https://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144-40_1006364575251/player/",
"only_matching": True,
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
pre_player = self._parse_json(
self._search_regex(
r"window\.\$REACTBASE_STATE\.prePlayer_mtweb\s*=\s*({.+})",
webpage,
"Pre Player",
),
display_id,
)["prePlayer"]
title = pre_player["title"]
video_info = self._parse_content(pre_player["video"], url)
content = pre_player.get("content") or {}
info = content.get("info") or {}
video_info.update(
{
"title": title,
"description": info.get("synopsis"),
"series": content.get("title"),
"season_number": int_or_none(info.get("season_number")),
"episode": content.get("subtitle"),
"episode_number": int_or_none(info.get("episode_number")),
"duration": int_or_none(info.get("duration")),
"age_limit": int_or_none(info.get("rating")),
"timestamp": parse_iso8601(pre_player.get("publishedTime")),
}
)
return video_info | nextdl/extractor/mitele.py | from __future__ import unicode_literals
from ..utils import int_or_none, parse_iso8601
from .telecinco import TelecincoIE
class MiTeleIE(TelecincoIE):
IE_DESC = "mitele.es"
_VALID_URL = r"https?://(?:www\.)?mitele\.es/(?:[^/]+/)+(?P<id>[^/]+)/player"
_TESTS = [
{
"url": "http://www.mitele.es/programas-tv/diario-de/57b0dfb9c715da65618b4afa/player",
"info_dict": {
"id": "FhYW1iNTE6J6H7NkQRIEzfne6t2quqPg",
"ext": "mp4",
"title": "Diario de La redacciรณn Programa 144",
"description": "md5:07c35a7b11abb05876a6a79185b58d27",
"series": "Diario de",
"season": "Season 14",
"season_number": 14,
"episode": "Tor, la web invisible",
"episode_number": 3,
"thumbnail": r"re:(?i)^https?://.*\.jpg$",
"duration": 2913,
"age_limit": 16,
"timestamp": 1471209401,
"upload_date": "20160814",
},
},
{
# no explicit title
"url": "http://www.mitele.es/programas-tv/cuarto-milenio/57b0de3dc915da14058b4876/player",
"info_dict": {
"id": "oyNG1iNTE6TAPP-JmCjbwfwJqqMMX3Vq",
"ext": "mp4",
"title": "Cuarto Milenio Temporada 6 Programa 226",
"description": "md5:5ff132013f0cd968ffbf1f5f3538a65f",
"series": "Cuarto Milenio",
"season": "Season 6",
"season_number": 6,
"episode": "Episode 24",
"episode_number": 24,
"thumbnail": r"re:(?i)^https?://.*\.jpg$",
"duration": 7313,
"age_limit": 12,
"timestamp": 1471209021,
"upload_date": "20160814",
},
"params": {
"skip_download": True,
},
},
{
"url": "http://www.mitele.es/series-online/la-que-se-avecina/57aac5c1c915da951a8b45ed/player",
"only_matching": True,
},
{
"url": "https://www.mitele.es/programas-tv/diario-de/la-redaccion/programa-144-40_1006364575251/player/",
"only_matching": True,
},
]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
pre_player = self._parse_json(
self._search_regex(
r"window\.\$REACTBASE_STATE\.prePlayer_mtweb\s*=\s*({.+})",
webpage,
"Pre Player",
),
display_id,
)["prePlayer"]
title = pre_player["title"]
video_info = self._parse_content(pre_player["video"], url)
content = pre_player.get("content") or {}
info = content.get("info") or {}
video_info.update(
{
"title": title,
"description": info.get("synopsis"),
"series": content.get("title"),
"season_number": int_or_none(info.get("season_number")),
"episode": content.get("subtitle"),
"episode_number": int_or_none(info.get("episode_number")),
"duration": int_or_none(info.get("duration")),
"age_limit": int_or_none(info.get("rating")),
"timestamp": parse_iso8601(pre_player.get("publishedTime")),
}
)
return video_info | 0.390127 | 0.214836 |
from django.shortcuts import render,redirect
from .forms import SignupForm,CreateProfile,UploadNewProject
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.views.generic.edit import CreateView
from .models import Profile, Project, Rating
from django.contrib.auth.decorators import login_required
# Create your views here.
def home(request):
projects=Project.objects.all()
return render(request, "home.html", {"projects":projects})
def Signup(request):
form=SignupForm()
if request.method =="POST":
form=SignupForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
form=SignupForm()
return render(request, "sign_up.html", {"form":form})
def user_login(request):
if request.method=="POST":
username=request.POST.get('username')
password=request.POST.get('password')
if username and password:
user=authenticate(username=username, password=password)
if user is not None:
login(request,user)
return redirect('home')
else:
messages.error(request, "Username or password is Incorrect")
else:
messages.error(request, "Fill out all the fields")
return render(request, "login.html", {})
def logout(request):
logout(request)
return redirect('home')
class CreateProfileView(CreateView):
model=Profile
form_class=CreateProfile
template_name='createprofile.html'
def form_valid(self, form):
form.instance.user=self.request.user
return super().form_valid(form)
def viewProject(request, pk):
project=Project.objects.filter(id=pk)
current_user=request.user
return render(request, 'viewproject.html', {"project":project})
def viewProfile(request, pk):
user=Profile.objects.filter(user_id=pk)
print(user)
projects=Project.objects.filter(profile_id=pk)
print(projects)
return render(request, "viewprofile.html", {"user":user, "projects":projects})
def searchProject(request):
if 'project' in request.GET and request.GET['project']:
search_term=request.GET.get('project')
searched_projects=Project.search_by_title(search_term)
message=f"{search_term}"
return render(request, "search.html", {"projects":searched_projects, "message":message})
else:
message="You have not searched for any project"
return render(request, "search")
@login_required(login_url='login')
def uploadProject(request):
form=UploadNewProject()
current_user=request.user
if request.method =="POST":
form=UploadNewProject(request.POST, request.FILES)
if form.is_valid():
project=form.save(commit=False)
project.profile=current_user
project.save()
return redirect('home')
else:
form=UploadNewProject()
return render(request, 'uploadproject.html', {"form":form}) | award_app/views.py | from django.shortcuts import render,redirect
from .forms import SignupForm,CreateProfile,UploadNewProject
from django.contrib.auth import authenticate, login, logout
from django.contrib import messages
from django.views.generic.edit import CreateView
from .models import Profile, Project, Rating
from django.contrib.auth.decorators import login_required
# Create your views here.
def home(request):
projects=Project.objects.all()
return render(request, "home.html", {"projects":projects})
def Signup(request):
form=SignupForm()
if request.method =="POST":
form=SignupForm(request.POST)
if form.is_valid():
form.save()
return redirect('login')
else:
form=SignupForm()
return render(request, "sign_up.html", {"form":form})
def user_login(request):
if request.method=="POST":
username=request.POST.get('username')
password=request.POST.get('password')
if username and password:
user=authenticate(username=username, password=password)
if user is not None:
login(request,user)
return redirect('home')
else:
messages.error(request, "Username or password is Incorrect")
else:
messages.error(request, "Fill out all the fields")
return render(request, "login.html", {})
def logout(request):
logout(request)
return redirect('home')
class CreateProfileView(CreateView):
model=Profile
form_class=CreateProfile
template_name='createprofile.html'
def form_valid(self, form):
form.instance.user=self.request.user
return super().form_valid(form)
def viewProject(request, pk):
project=Project.objects.filter(id=pk)
current_user=request.user
return render(request, 'viewproject.html', {"project":project})
def viewProfile(request, pk):
user=Profile.objects.filter(user_id=pk)
print(user)
projects=Project.objects.filter(profile_id=pk)
print(projects)
return render(request, "viewprofile.html", {"user":user, "projects":projects})
def searchProject(request):
if 'project' in request.GET and request.GET['project']:
search_term=request.GET.get('project')
searched_projects=Project.search_by_title(search_term)
message=f"{search_term}"
return render(request, "search.html", {"projects":searched_projects, "message":message})
else:
message="You have not searched for any project"
return render(request, "search")
@login_required(login_url='login')
def uploadProject(request):
form=UploadNewProject()
current_user=request.user
if request.method =="POST":
form=UploadNewProject(request.POST, request.FILES)
if form.is_valid():
project=form.save(commit=False)
project.profile=current_user
project.save()
return redirect('home')
else:
form=UploadNewProject()
return render(request, 'uploadproject.html', {"form":form}) | 0.421433 | 0.095518 |
import uuid
from glance.api import policy
from glance.openstack.common import local
class RequestContext(object):
"""Stores information about the security context.
Stores how the user accesses the system, as well as additional request
information.
"""
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
def __init__(self, auth_token=None, user=None, tenant=None, roles=None,
is_admin=False, read_only=False, show_deleted=False,
owner_is_tenant=True, service_catalog=None,
policy_enforcer=None, domain=None, user_domain=None,
project_domain=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.roles = roles or []
self.read_only = read_only
self._show_deleted = show_deleted
self.owner_is_tenant = owner_is_tenant
self.request_id = str(uuid.uuid4())
self.service_catalog = service_catalog
self.policy_enforcer = policy_enforcer or policy.Enforcer()
self.is_admin = is_admin
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
if not self.is_admin:
self.is_admin = self.policy_enforcer.check_is_admin(self)
if not hasattr(local.store, 'context'):
self.update_store()
def to_dict(self):
# NOTE(ameade): These keys are named to correspond with the default
# format string for logging the context in openstack common
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {
'request_id': self.request_id,
# NOTE(bcwaldon): openstack-common logging expects 'user'
'user': self.user,
'user_id': self.user,
# NOTE(bcwaldon): openstack-common logging expects 'tenant'
'tenant': self.tenant,
'tenant_id': self.tenant,
'project_id': self.tenant,
'is_admin': self.is_admin,
'read_deleted': self.show_deleted,
'roles': self.roles,
'auth_token': self.auth_token,
'service_catalog': self.service_catalog,
'user_identity': user_idt
}
@classmethod
def from_dict(cls, values):
return cls(**values)
def update_store(self):
local.store.context = self
@property
def owner(self):
"""Return the owner to correlate with an image."""
return self.tenant if self.owner_is_tenant else self.user
@property
def show_deleted(self):
"""Admins can see deleted by default"""
if self._show_deleted or self.is_admin:
return True
return False | glance/context.py |
import uuid
from glance.api import policy
from glance.openstack.common import local
class RequestContext(object):
"""Stores information about the security context.
Stores how the user accesses the system, as well as additional request
information.
"""
user_idt_format = '{user} {tenant} {domain} {user_domain} {p_domain}'
def __init__(self, auth_token=None, user=None, tenant=None, roles=None,
is_admin=False, read_only=False, show_deleted=False,
owner_is_tenant=True, service_catalog=None,
policy_enforcer=None, domain=None, user_domain=None,
project_domain=None):
self.auth_token = auth_token
self.user = user
self.tenant = tenant
self.roles = roles or []
self.read_only = read_only
self._show_deleted = show_deleted
self.owner_is_tenant = owner_is_tenant
self.request_id = str(uuid.uuid4())
self.service_catalog = service_catalog
self.policy_enforcer = policy_enforcer or policy.Enforcer()
self.is_admin = is_admin
self.domain = domain
self.user_domain = user_domain
self.project_domain = project_domain
if not self.is_admin:
self.is_admin = self.policy_enforcer.check_is_admin(self)
if not hasattr(local.store, 'context'):
self.update_store()
def to_dict(self):
# NOTE(ameade): These keys are named to correspond with the default
# format string for logging the context in openstack common
user_idt = (
self.user_idt_format.format(user=self.user or '-',
tenant=self.tenant or '-',
domain=self.domain or '-',
user_domain=self.user_domain or '-',
p_domain=self.project_domain or '-'))
return {
'request_id': self.request_id,
# NOTE(bcwaldon): openstack-common logging expects 'user'
'user': self.user,
'user_id': self.user,
# NOTE(bcwaldon): openstack-common logging expects 'tenant'
'tenant': self.tenant,
'tenant_id': self.tenant,
'project_id': self.tenant,
'is_admin': self.is_admin,
'read_deleted': self.show_deleted,
'roles': self.roles,
'auth_token': self.auth_token,
'service_catalog': self.service_catalog,
'user_identity': user_idt
}
@classmethod
def from_dict(cls, values):
return cls(**values)
def update_store(self):
local.store.context = self
@property
def owner(self):
"""Return the owner to correlate with an image."""
return self.tenant if self.owner_is_tenant else self.user
@property
def show_deleted(self):
"""Admins can see deleted by default"""
if self._show_deleted or self.is_admin:
return True
return False | 0.752649 | 0.100392 |
from django.conf import settings
from django.core.management import base
from django.utils import timezone
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from users.models import PermissionSupport
DELIMETER = '\n- '
class Command(base.BaseCommand):
help = (
'Remove the built-in permissions and groups and populate the set '
'from the provided permissions file.'
)
def add_arguments(self, parser):
parser.add_argument(
'-f',
'--force',
help='Dump all existing groups and permissions and recreate them.',
action='store_true',
default=False
)
def log(self, message):
""" Write log messages to stdout in a consistent format. """
self.stdout.write('[{date}] {message}'.format(
date=timezone.now(),
message=message
))
def handle_permissions(self, permissions):
content_type = ContentType.objects.get_for_model(PermissionSupport)
# Load the permissions from the global permissions file.
for codename, description in permissions:
qs = Permission.objects.filter(codename=codename)
if not qs.exists():
self.log(f'Creating permission: {codename}...')
Permission.objects.create(
codename=codename,
name=description,
content_type=content_type
)
else:
self.log(f'Permission {codename} already exists. Skipping...')
def handle_groups(self, groups):
for name, permissions in groups:
qs = Group.objects.filter(name=name)
# Create Group
if not qs.exists():
self.log(f'Creating group: "{name}"...')
group = Group.objects.create(name=name)
else:
self.log(f'Group "{name}" already exists. Skipping...')
group = qs.first()
# Add Permissions to group
group_name = f'Group: {name}'
joined_permissions = DELIMETER + DELIMETER.join(permissions)
line = '-' * len(group_name)
self.log(
f'Setting permissions for group:\n\n{group_name}\n'
f'{line}{joined_permissions}\n\n'
)
group.permissions.set(
Permission.objects.filter(codename__in=permissions)
)
def handle(self, verbosity, force=False, *args, **kwargs):
if force:
self.log('Dropping existing groups and permissions...')
Permission.objects.all().delete()
Group.objects.all().delete()
self.handle_permissions(settings.DEFAULT_PERMISSIONS)
self.handle_groups(settings.DEFAULT_GROUPS)
self.log('Done.') | api/users/management/commands/rebuild_permissions.py | from django.conf import settings
from django.core.management import base
from django.utils import timezone
from django.contrib.auth.models import Permission, Group
from django.contrib.contenttypes.models import ContentType
from users.models import PermissionSupport
DELIMETER = '\n- '
class Command(base.BaseCommand):
help = (
'Remove the built-in permissions and groups and populate the set '
'from the provided permissions file.'
)
def add_arguments(self, parser):
parser.add_argument(
'-f',
'--force',
help='Dump all existing groups and permissions and recreate them.',
action='store_true',
default=False
)
def log(self, message):
""" Write log messages to stdout in a consistent format. """
self.stdout.write('[{date}] {message}'.format(
date=timezone.now(),
message=message
))
def handle_permissions(self, permissions):
content_type = ContentType.objects.get_for_model(PermissionSupport)
# Load the permissions from the global permissions file.
for codename, description in permissions:
qs = Permission.objects.filter(codename=codename)
if not qs.exists():
self.log(f'Creating permission: {codename}...')
Permission.objects.create(
codename=codename,
name=description,
content_type=content_type
)
else:
self.log(f'Permission {codename} already exists. Skipping...')
def handle_groups(self, groups):
for name, permissions in groups:
qs = Group.objects.filter(name=name)
# Create Group
if not qs.exists():
self.log(f'Creating group: "{name}"...')
group = Group.objects.create(name=name)
else:
self.log(f'Group "{name}" already exists. Skipping...')
group = qs.first()
# Add Permissions to group
group_name = f'Group: {name}'
joined_permissions = DELIMETER + DELIMETER.join(permissions)
line = '-' * len(group_name)
self.log(
f'Setting permissions for group:\n\n{group_name}\n'
f'{line}{joined_permissions}\n\n'
)
group.permissions.set(
Permission.objects.filter(codename__in=permissions)
)
def handle(self, verbosity, force=False, *args, **kwargs):
if force:
self.log('Dropping existing groups and permissions...')
Permission.objects.all().delete()
Group.objects.all().delete()
self.handle_permissions(settings.DEFAULT_PERMISSIONS)
self.handle_groups(settings.DEFAULT_GROUPS)
self.log('Done.') | 0.551574 | 0.101278 |
import numpy as np
import daproli as dp
import unittest
class TransformerTest(unittest.TestCase):
def test_Mapper(self):
data = range(100)
func = lambda x : x**2
res1 = dp.map(func, data)
res2 = dp.Mapper(func).transform(data)
self.assertEqual(res1, res2)
def test_Filter(self):
data = range(100)
pred = lambda x: x % 2 == 0
res1 = dp.filter(pred, data)
res2 = dp.Filter(pred).transform(data)
self.assertEqual(res1, res2)
def test_Splitter(self):
data = range(100)
func = lambda x: x % 2 == 0
res1, res2 = dp.split(func, data)
res3, res4 = dp.Splitter(func).transform(data)
self.assertEqual(res1, res3)
self.assertEqual(res2, res4)
def test_Expander(self):
data = range(100)
func = lambda x : [x, x**2]
res1, res2 = dp.expand(func, data)
res3, res4 = dp.Expander(func).transform(data)
self.assertEqual(res1, res3)
self.assertEqual(res2, res4)
def test_Combiner(self):
data1 = range(0, 100, 2)
data2 = range(1, 100, 2)
func = lambda x1, x2: (x1, x2)
res1 = dp.combine(func, data1, data2)
res2 = dp.Combiner(func).transform([data1, data2])
self.assertEqual(res1, res2)
def test_Joiner(self):
data1 = range(0, 100, 2)
data2 = range(1, 100, 2)
func = lambda x, y: y-x == 1
res1 = dp.join(func, data1, data2)
res2 = dp.Joiner(func).transform([data1, data2])
self.assertEqual(res1, res2)
def test_Manipulator(self):
data = np.random.choice(np.arange(100), 100, replace=False)
res = dp.Manipulator(sorted).transform(data)
self.assertEqual([i for i in range(100)], res)
res = dp.Manipulator(sorted, void=True).transform(data)
self.assertEqual(data.tolist(), res.tolist())
def test_Window(self):
data = range(100)
res1 = dp.windowed(data, 2, step=2)
res2 = dp.Window(2, step=2).transform(data)
self.assertEqual(res1, res2)
def test_Flat(self):
data = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
res1 = dp.flatten(data)
res2 = dp.Flat().transform(data)
self.assertEqual(res1, res2)
def test_Union(self):
data1 = range(0, 100, 2)
data2 = range(1, 100, 2)
func1 = lambda x : x**2
func2 = lambda x : x**3
res1, res2 = dp.map(func1, data1), dp.map(func2, data2)
res3, res4 = dp.Union(
dp.Mapper(func1),
dp.Mapper(func2),
).transform([data1, data2])
self.assertEqual(res1, res3)
self.assertEqual(res2, res4)
def test_Pipeline(self):
data = range(10)
res = dp.Pipeline(
dp.Filter(lambda x : x > 1),
dp.Filter(lambda x : all(x % idx != 0 for idx in range(2, x))),
).transform(data)
self.assertEqual([2, 3, 5, 7], res) | daproli/tests/test_transformer.py | import numpy as np
import daproli as dp
import unittest
class TransformerTest(unittest.TestCase):
def test_Mapper(self):
data = range(100)
func = lambda x : x**2
res1 = dp.map(func, data)
res2 = dp.Mapper(func).transform(data)
self.assertEqual(res1, res2)
def test_Filter(self):
data = range(100)
pred = lambda x: x % 2 == 0
res1 = dp.filter(pred, data)
res2 = dp.Filter(pred).transform(data)
self.assertEqual(res1, res2)
def test_Splitter(self):
data = range(100)
func = lambda x: x % 2 == 0
res1, res2 = dp.split(func, data)
res3, res4 = dp.Splitter(func).transform(data)
self.assertEqual(res1, res3)
self.assertEqual(res2, res4)
def test_Expander(self):
data = range(100)
func = lambda x : [x, x**2]
res1, res2 = dp.expand(func, data)
res3, res4 = dp.Expander(func).transform(data)
self.assertEqual(res1, res3)
self.assertEqual(res2, res4)
def test_Combiner(self):
data1 = range(0, 100, 2)
data2 = range(1, 100, 2)
func = lambda x1, x2: (x1, x2)
res1 = dp.combine(func, data1, data2)
res2 = dp.Combiner(func).transform([data1, data2])
self.assertEqual(res1, res2)
def test_Joiner(self):
data1 = range(0, 100, 2)
data2 = range(1, 100, 2)
func = lambda x, y: y-x == 1
res1 = dp.join(func, data1, data2)
res2 = dp.Joiner(func).transform([data1, data2])
self.assertEqual(res1, res2)
def test_Manipulator(self):
data = np.random.choice(np.arange(100), 100, replace=False)
res = dp.Manipulator(sorted).transform(data)
self.assertEqual([i for i in range(100)], res)
res = dp.Manipulator(sorted, void=True).transform(data)
self.assertEqual(data.tolist(), res.tolist())
def test_Window(self):
data = range(100)
res1 = dp.windowed(data, 2, step=2)
res2 = dp.Window(2, step=2).transform(data)
self.assertEqual(res1, res2)
def test_Flat(self):
data = [[0, 1], [2, 3], [4, 5], [6, 7], [8, 9]]
res1 = dp.flatten(data)
res2 = dp.Flat().transform(data)
self.assertEqual(res1, res2)
def test_Union(self):
data1 = range(0, 100, 2)
data2 = range(1, 100, 2)
func1 = lambda x : x**2
func2 = lambda x : x**3
res1, res2 = dp.map(func1, data1), dp.map(func2, data2)
res3, res4 = dp.Union(
dp.Mapper(func1),
dp.Mapper(func2),
).transform([data1, data2])
self.assertEqual(res1, res3)
self.assertEqual(res2, res4)
def test_Pipeline(self):
data = range(10)
res = dp.Pipeline(
dp.Filter(lambda x : x > 1),
dp.Filter(lambda x : all(x % idx != 0 for idx in range(2, x))),
).transform(data)
self.assertEqual([2, 3, 5, 7], res) | 0.729905 | 0.845624 |