gt
stringclasses
1 value
context
stringlengths
2.49k
119k
# Copyright 2014 Technische Universitaet Berlin # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. __author__ = 'mpa' from interfaces.Checker import Checker as ABCChecker import logging import re import traceback from services.DatabaseManager import DatabaseManager from model.Entities import Topology, Service, Network, SecurityGroup, Image, Flavor, Key from emm_exceptions.NotFoundException import NotFoundException from emm_exceptions.InvalidInputException import InvalidInputException from emm_exceptions.TypeErrorException import TypeErrorException from emm_exceptions.NotDefinedException import NotDefinedException from emm_exceptions.NotUniqueException import NotUniqueException from sm.so.service_orchestrator import LOG # alarms METERS = ["system.cpu.util[,idle]"] STATISTICS = ["avg"] COMPARISON_OPERATORS = ["gt", "lt", "<", ">"] # actions ADJUSTMENT_TYPES = ["ChangeInCapacity"] # security rules PROTOCOLS = ["tcp", "udp", "icmp"] # requirements PARAMETERS = ["private_ip", "public_ip"] class Checker(ABCChecker): @staticmethod def check(action=None, alarm=None, flavor=None, image=None, key=None, network=None, policy=None, security_group=None, service=None, service_instance=None, service_type=None, topology=None, unit=None): try: if action: checkAction(action) if alarm: checkAlarm(alarm) if flavor: checkFlavor(flavor) if image: checkImage(image) if key: checkKey(key) if network: checkNetwork(network) if policy: checkPolicy(policy) if security_group: checkSecurityGroupUniqueness(security_group) checkSecurityGroup(security_group) if service: checkServiceUniqueness(service) checkService(service) if service_instance: checkServiceInstance(service_instance) if service_type: checkServiceType(service_type) if topology: checkTopolgoyUniqueness(topology) checkTopology(topology) if unit: checkUnit(unit) except Exception, exc: LOG.exception(exc) raise def checkTopology(topology): LOG.info("Check topology \"%s\"." % topology.name) # check service instances LOG.debug("Check service_instances of topology %s." % topology.name) try: for service_instance in topology.service_instances: # check service instance uniqueness inside topology LOG.debug( "Check service instance's uniqueness of \"%s\" of topology \"%s\"." % ( service_instance.name, topology.name)) checkServiceInstanceUniqueness(service_instance, topology) # check service instance except requirements LOG.debug("Check service instance \"%s\" of topology \"%s\"." % (service_instance.name, topology.name)) checkServiceInstance(service_instance) # check requirements LOG.debug("Check requirements' uniqueness for service instance \"%s\" of topology \"%s\"." % ( service_instance.name, topology.name)) checkRequirementsUniqueness(service_instance.requirements) LOG.debug("Check requirements' dependencies for service instance \"%s\" of topology \"%s\"." % ( service_instance.name, topology.name)) checkRequirementsDependencies(service_instance.requirements, topology.service_instances) LOG.debug("Check policies' uniqueness for service instance \"%s\" of topology \"%s\"." % ( service_instance.name, topology.name)) checkPoliciesUnqiueness(service_instance.policies) except Exception, exc: exc.message = 'Topology:\"%s\"->%s' % (topology.name, exc.message) raise exc LOG.info("Topology \"%s\" is valid" % topology.name) def checkTopolgoyUniqueness(topology): db = DatabaseManager() # check names LOG.debug("Check uniqueness of name of the toplogy \"%s\"." % topology.name) for top in db.get_all(Topology): if topology.ext_name == top.ext_name and topology.id != top.id: raise NotUniqueException("Topology name \"%s\" is already used." % topology.name) def checkServiceInstance(service_instance): # check service type LOG.debug("Check service_type of service instance \"%s\"." % service_instance.name) try: if service_instance.service_type: checkServiceType(service_instance.service_type) else: raise NotDefinedException("service_type is not defined.") # check flavor LOG.debug("Check flavor of service instance \"%s\"." % service_instance.name) if service_instance.flavor: checkFlavor(service_instance.flavor) else: raise NotDefinedException("flavor is not defined.") # check image LOG.debug("Check image of service instance \"%s\"." % service_instance.name) if service_instance.image: checkImage(service_instance.image) else: raise NotDefinedException("image is not defined.") # check keypair if provided LOG.debug("Check key of service instance \"%s\"." % service_instance.name) if service_instance.key: checkKey(service_instance.key) # check size LOG.debug("Check size of service instance \"%s\"." % service_instance.name) if service_instance.size: checkSize(service_instance.size) else: raise NotDefinedException("size is not defined.") # check networks LOG.debug("Check networks of service instance \"%s\"." % service_instance.name) for network in service_instance.networks: checkNetwork(network) # check policies if service_instance.policies: LOG.debug("Check policies of service instance \"%s\"." % service_instance.name) for policy in service_instance.policies: checkPolicy(policy) LOG.debug("Check uniqueness of policies for service instance \"%s\"." % service_instance.name) checkPoliciesUnqiueness(service_instance.policies) # check requirements if service_instance.requirements: LOG.debug("Check requirements of service instance \"%s\"." % service_instance.name) for requirement in service_instance.requirements: checkRequirement(requirement) except Exception, exc: exc.message = 'ServiceInstance:\"%s\"->%s' % (service_instance.name, exc.message) raise exc def checkServiceInstanceUniqueness(service_instance, topology): for comp_service_instance in topology.service_instances: try: if service_instance.name == comp_service_instance.name and service_instance != comp_service_instance: raise NotUniqueException("name:\"%s\" is not unique." % service_instance.name) except Exception, exc: exc.message = '%s->%s' % (service_instance.name, exc.message) raise exc LOG.debug("name \"%s\" of serivce instance is unique." % service_instance.name) def checkUnit(unit): pass def checkPolicy(policy): try: if policy.name: LOG.debug("Check policy \"%s\"" % policy.name) else: raise NotDefinedException("name is not defined.") if policy.period: if isinstance(policy.period, (long, int)): if policy.period > 0: LOG.debug("period \"%s\" is valid." % (policy.period)) else: raise InvalidInputException( "period:\"%s\" is not valid. Value must be greater than 0." % policy.period) else: raise InvalidInputException("period:\"%s\" is not valid. It must be an integer." % policy.period) else: raise NotDefinedException("period is not defined." % policy.name) if policy.alarm: LOG.debug("Check alarm of policy \"%s\"" % policy.name) checkAlarm(policy.alarm) else: raise NotDefinedException("alarm is not defined.") if policy.action: LOG.debug("Check action of policy \"%s\"" % policy.name) checkAction(policy.action) else: raise NotDefinedException("action is not defined.") except Exception, exc: exc.message = 'Policy:\"%s\"->%s' % (policy.name, exc.message) raise exc def checkPoliciesUnqiueness(policies): LOG.debug("Check uniqueness of policies.") for policy in policies: for comp_policy in policies: if policy.name == comp_policy and policy != comp_policy: raise NotUniqueException("policy:\"%s\" is not unique." % policy.name) LOG.debug("policy \"%s\" is unique." % policy.name) def checkAlarm(alarm): try: # check that the meter is available if alarm.meter_name: if alarm.meter_name in METERS: LOG.debug("meter_name \"%s\" is available." % alarm.meter_name) else: raise NotFoundException( "meter_name:\"%s\" is not available. Available meter names:%s" % (alarm.meter_name, METERS)) else: raise NotDefinedException("meter_name:\"%s\" is not defined.") # check that the statistic is available if alarm.statistic in STATISTICS: LOG.debug("statistic \"%s\" is available." % alarm.statistic) else: raise NotFoundException( "statistic:\"%s\" is not available. Available statistics: %s" % (alarm.statistic, STATISTICS)) # check that the evaluation period is an interger greater than 0 if alarm.evaluation_periods: if isinstance(alarm.evaluation_periods, (long, int)): if alarm.evaluation_periods > 0: LOG.debug("evaluation_periods \"%s\" is valid." % alarm.evaluation_periods) else: raise InvalidInputException( "evaluation_periods:\"%s\" is not valid. It must be greater than 0." % alarm.evaluation_periods) else: raise TypeErrorException( "evaluation_periods:\"%s\" is not valid. It must be an integer." % alarm.evaluation_periods) else: raise NotDefinedException("evaluation_periods is not defined.") if alarm.threshold: if isinstance(alarm.threshold, (long, int)): LOG.debug("threshold \"%s\" is valid." % alarm.threshold) else: raise TypeErrorException("threshold:\"%s\" is not valid. It must be an integer." % alarm.threshold) else: raise NotDefinedException("threshold is not defined.") if alarm.comparison_operator: if alarm.comparison_operator in COMPARISON_OPERATORS: LOG.debug("comparison_operator \"%s\" is available." % alarm.comparison_operator) else: NotFoundException("comparison_operator:\"%s\" is not available. Available comparison operators: %s" % ( alarm.comparison_operator, COMPARISON_OPERATORS)) else: raise NotDefinedException("comparison_operator is not defined.") except Exception, exc: exc.message = 'Alarm->%s' % exc.message raise exc def checkAction(action): try: # check adjustment type if action.adjustment_type: if action.adjustment_type in ADJUSTMENT_TYPES: LOG.debug("adjustment_type \"%s\" is available." % action.adjustment_type) else: raise NotFoundException("adjustment_type:\"%s\" is not available. Available adjustment types: %s" % ( action.adjustment_type, ADJUSTMENT_TYPES)) else: raise NotDefinedException("adjustment_type:\"%s\" is not defined.") # check scaling adjustment if action.scaling_adjustment: if isinstance(action.scaling_adjustment, (long, int)): LOG.debug("scaling_adjusment \"%s\" is valid." % action.scaling_adjustment) else: raise TypeErrorException( "scaling_adjusment:\"%s\" is not valid. It must be an integer." % action.scaling_adjustment) else: raise NotDefinedException("scaling_adjusment is not defined.") # check cooldown if action.cooldown: if isinstance(action.cooldown, (long, int)): if action.cooldown > 0: LOG.debug("cooldown \"%s\" is valid." % action.cooldown) else: raise InvalidInputException("cooldown:\"%s\" must be greater than 0" % action.cooldown) else: raise TypeErrorException("cooldown:\"%s\" is not valid. It must be an integer." % action.cooldown) else: raise NotDefinedException("cooldown is not defined.") except Exception, exc: exc.message = 'Action->%s' % exc.message raise exc def checkImage(image): db = DatabaseManager() existing_images = db.get_all(Image) if image.name in [existing_image.name for existing_image in existing_images]: LOG.debug("image \"%s\" is available." % image) else: raise NotFoundException( "image:\"%s\" is not available. Available images: %s" % ( image, [existing_image.name for existing_image in existing_images])) def checkFlavor(flavor): db = DatabaseManager() existing_flavors = db.get_all(Flavor) if flavor.name in [existing_flavor.name for existing_flavor in existing_flavors]: LOG.debug("flavor \"%s\" is available." % flavor) else: raise NotFoundException( "flavor:\"%s\" is not available. Available flavors: %s" % ( flavor, [existing_flavor.name for existing_flavor in existing_flavors])) def checkKey(key): db = DatabaseManager() existing_keys = db.get_all(Key) if key.name in [existing_key.name for existing_key in existing_keys]: LOG.debug("key \"%s\" is available." % key) else: raise NotFoundException( "key:\"%s\" is not available. Available keys: %s" % ( key, [existing_key.name for existing_key in existing_keys])) def checkSize(size={}): if size.get('def'): if isinstance(size.get('def'), (long, int)): if size.get('def') > 0: LOG.debug("default size \"%s\" is valid." % size.get('def')) else: raise InvalidInputException("default size:\"%s\" must be bigger than 0." % size.get('def')) else: raise TypeErrorException("default size:\"%s\" must be an integer." % size.get('def')) else: raise NotDefinedException("default size is not defined.") if size.get('min'): if isinstance(size.get('min'), (long, int)): if size.get('min') > 0: LOG.debug("minimal size \"%s\" is valid." % size.get('min')) else: raise InvalidInputException( "minimal size:\"%s\" is not valid. minimal_size must be bigger than 0." % size.get('min')) else: raise TypeErrorException("minimal size:\"%s\" must be an integer." % size.get('min')) else: raise NotDefinedException("minimal size is not defined.") if size.get('max'): if isinstance(size.get('max'), (long, int)): if size.get('max') > 0: LOG.debug("maximal size \"%s\" is valid." % size.get('max')) else: raise InvalidInputException( "maximal size:\"%s\" is not valid. maximal size must be bigger than 0." % size.get('max')) else: raise TypeErrorException("maximal size:\"\%s\" must be an integer." % size.get('max')) else: raise NotDefinedException("maximal size is not defined.") if size.get('min') and size.get('max'): if size.get('min') <= size.get('max'): LOG.debug( "minimal size \"%s\" and maximal size \"%s\" are valid. minimal size is equal or lower than maximal size." % ( size.get('min'), size.get('max'))) else: raise InvalidInputException( "minimal size:\"%s\" and maximal size:\"%s\" are not valid. minimal size must be equal or lower than maximal size." % ( size.get('min'), size.get('max'))) def checkNetwork(network): try: db = DatabaseManager() existing_networks = db.get_all(Network) found_private_net = False found_subnet = False found_public_net = False for existing_network in existing_networks: if network.private_net == existing_network.ext_id and not found_private_net: if existing_network.public == False: LOG.debug("private_network \"%s\" is available." % network.private_net) found_private_net = True else: raise InvalidInputException( "private_network:\"%s\" is available but it is marked as public and not as private as defined." % network.private_net) for subnet in existing_network.subnets: if network.private_subnet == subnet.ext_id and not found_subnet: found_subnet = True if found_subnet: LOG.debug("private_subnet \"%s\" is available." % network.private_subnet) else: raise InvalidInputException("private_subnet:\"%s\" is not available." % network.private_subnet) if network.public_net == existing_network.ext_id and not found_public_net: if existing_network.public == True: LOG.debug("public_network \"%s\" is available." % network.public_net) found_public_net = True else: raise InvalidInputException( "network:\"%s\" is available but it is marked as private and not as public as defined." % network.public_net) if not network.private_net and not network.private_subnet and not network.public_net: LOG.debug("Networks were not defined.") elif network.private_net and network.private_subnet and network.public_net: if found_private_net and found_subnet and found_public_net: LOG.debug("All defined networks are available for network: %s" % network) if not found_private_net: raise NotFoundException("Not found private network: %s" % network) if not found_subnet: raise NotFoundException("Not found private subnet network: %s" % network) if not found_public_net: raise NotFoundException("Not found public network: %s" % network) elif network.private_net and network.private_subnet and not network.public_net: if found_private_net and found_subnet and not found_public_net: LOG.debug("All defined networks are available for network: %s" % network) if not found_private_net: raise NotFoundException("Not found private network: %s" % network) if not found_subnet: raise NotFoundException("Not found private subnet network: %s" % network) elif not network.private_net and network.public_net: raise InvalidInputException("Private net is not defined but the public.") else: raise InvalidInputException("Error while checking networks.") except Exception, exc: exc.message = 'Network:\"%s\"->%s' % (network.name, exc.message) raise exc def checkNetworksUniqueness(networks): LOG.debug("\"Check uniqueness of networks.\"") for network in networks: for comp_network in networks: if network.name == comp_network.name and network != comp_network: raise NotUniqueException("network:\"%s\" is not unique." % network.name) LOG.debug("network \"%s\" is unique." % network.name) def checkServiceType(service_type): db = DatabaseManager() services = db.get_all(Service) found = False for service in services: if service.service_type == service_type: found = True LOG.debug("service_type \"%s\" is available." % service_type) if not found: raise NotFoundException( "service_type:\"%s\" is not available. Available service_types:%s" % ( service_type, [service.service_type for service in services])) def checkSecurityGroup(security_group): try: LOG.debug("Check security group \"%s\"." % security_group.name) for rule in security_group.rules: checkRule(rule) except Exception, exc: exc.message = 'SecurityGroup:\"%s\"->%s' % (security_group.name, exc.message) raise exc def checkSecurityGroupUniqueness(security_group): db = DatabaseManager() existing_security_groups = db.get_all(SecurityGroup) LOG.debug("Check uniqueness of name of the security group \"%s\"." % security_group.name) for existing_security_group in existing_security_groups: if security_group.name == existing_security_group.name and security_group != existing_security_group: raise NotUniqueException("SecurityGroup:\"%s\" is already existing." % security_group.name) LOG.debug("Check rules of security group \"%s\"." % security_group.name) def checkRule(rule): try: if rule.name: LOG.debug("Check rule \"%s\"." % rule.name) else: raise NotDefinedException("name is not defined.") # check remote_ip_prefix if rule.remote_ip_prefix: a = re.compile( "^((25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)/([012345]?[1-9]|[6]?[0-4])$") if a.match(rule.remote_ip_prefix): LOG.debug("remote_ip_prefix:\"%s\" for rule %s is valid." % (rule.remote_ip_prefix, rule.name)) else: raise InvalidInputException("remote_ip_prefix:\"%s\"is not valid. Example: \"0.0.0.0/0\"." % ( rule.remote_ip_prefix)) else: raise NotDefinedException("remote_ip_prefix is not defined.") # check protocol if rule.protocol: if rule.protocol in PROTOCOLS: LOG.debug("protocol \"%s\" is available." % rule.protocol) else: raise NotFoundException( "protocol:\"%s\" is not available. Available protocols are: %s." % (rule.protocol, PROTOCOLS)) else: raise NotDefinedException("protocol is not defined.") # check defined ports port_min = 0 port_max = 65535 if rule.port_range_min and rule.port_range_max: if not isinstance(rule.port_range_min, (long, int)): try: rule.port_range_min = int(rule.port_range_min) except Exception: raise TypeErrorException( "port_range_min:\"%s\" is not valid. It must be an integer." % rule.port_range_min) if not isinstance(rule.port_range_max, (long, int)): try: rule.port_range_max = int(rule.port_range_max) except Exception: raise TypeErrorException( "port_range_max:\"%s\" is not valid. It must be an integer." % rule.port_range_max) if rule.port_range_min <= rule.port_range_max: LOG.debug( "port_range is valid (%s <= %s)." % (rule.port_range_min, rule.port_range_max)) else: raise InvalidInputException( "port_range is not valid (%s <= %s). port_range_min is bigger than port_range_max" % ( rule.port_range_min, rule.port_range_max)) if rule.port_range_min >= port_min and rule.port_range_min <= port_max: LOG.debug("port_range_min \"%s\" for rule %s is valid (%s >= %s and %s <= %s)." % ( rule.port_range_min, rule.name, rule.port_range_min, port_min, rule.port_range_min, port_max)) else: raise InvalidInputException( "port_range_min:\"%s\" is not valid (%s >= %s and %s <= %s). \"port_range_min\" is not in range." % ( rule.port_range_min, rule.port_range_min, port_min, rule.port_range_min, port_max)) if rule.port_range_max >= port_min and rule.port_range_max <= port_max: LOG.debug("port_range_max \"%s\" for rule %s is valid (%s >= %s and %s <= %s)." % ( rule.port_range_max, rule.name, rule.port_range_max, port_min, rule.port_range_max, port_max)) else: raise InvalidInputException( "port_range_max:\"%s\" is not valid (%s >= %s and %s <= %s). \"port_range_max\" is not in range." % ( rule.port_range_max, rule.port_range_max, port_min, rule.port_range_max, port_max)) elif rule.port_range_min and not rule.port_range_max: raise InvalidInputException( "port range is not valid. Found \"port_range_min\":%s but no \"port_range_max\"." % ( rule.port_range_min)) elif not rule.port_range_min and rule.port_range_max: raise InvalidInputException( "port_range is not valid. Found \"port_range_max\":%s but no \"port_range_min\"." % ( rule.port_range_max)) else: LOG.debug("port_range is not defined.") except Exception, exc: exc.message = 'Rule:\"%s\"->%s' % (rule.name, exc.message) raise exc def checkRuleUnqiueness(rules=[]): LOG.debug("Check uniqueness of security group rules.") for rule in rules: try: for comp_rule in rules: if rule.name == comp_rule.name and rule != comp_rule: raise InvalidInputException("rule:\"%s\" is not unique." % rule.name) LOG.debug("rule \"%s\" is unique." % rule.name) except Exception, exc: exc.message = 'Rule:\"%s\"->%s' % (rule.name, exc.message) raise exc def checkService(service): try: # check flavor LOG.debug("Check flavor of service \"%s\"." % service.service_type) if service.flavor: checkFlavor(service.flavor) else: raise NotDefinedException("flavor is not defined.") # check image LOG.debug("Check image of service \"%s\"." % service.service_type) if service.image: checkImage(service.image) else: raise NotDefinedException("image is not defined.") # check keypair if provided LOG.debug("Check key of service \"%s\"." % service.service_type) if service.key: checkKey(service.key) # check size LOG.debug("Check size of service \"%s\"." % service.service_type) if service.size: checkSize(service.size) else: raise NotDefinedException("size is not defined.") # check networks LOG.debug("Check networks of service \"%s\"." % service.service_type) for network in service.networks: checkNetwork(network) # check requirements if service.requirements: LOG.debug("Check requirements of service \"%s\"." % service.service_type) for requirement in service.requirements: checkRequirement(requirement) except Exception, exc: exc.message = 'Service:\"%s\"->%s' % (service.service_type, exc.message) raise exc def checkServiceUniqueness(service): db = DatabaseManager() # check uniqueness of service LOG.debug("Check uniqueness of name of the service %s." % service.service_type) if service.service_type: for existing_service in db.get_all(Service): if service.service_type == existing_service.service_type and service != existing_service: raise NotUniqueException( "Service:\"%s\" is already existing." % service.service_type) else: raise NotDefinedException("service_type is not defined.") def checkRequirement(requirement): try: if requirement.name: LOG.debug("Check requirement with name \"%s\"" % requirement.name) else: raise NotDefinedException("name of requirement is not defined.") if requirement.parameter: if requirement.parameter in PARAMETERS: LOG.debug("parameter \"%s\" is available" % requirement.parameter) else: raise NotFoundException( "parameter:\"%s\" is not available. Available parameters:%s" % (requirement.parameter, PARAMETERS)) else: raise NotDefinedException("parameter is not defined.") if requirement.source: LOG.debug("source \"%s\" is defined." % requirement.source) else: raise NotDefinedException("source is not defined.") if requirement.obj_name: LOG.debug("obj_name \"%s\" of requirement is defined." % requirement.obj_name) else: raise NotDefinedException("obj_name is not defined.") except Exception, exc: exc.message = 'Requirement:\"%s\"->%s' % (requirement.name, exc.message) raise exc def checkRequirementsDependencies(requirements=[], service_instances=[]): for requirement in requirements: try: counter = 0 LOG.debug("Check dependencies for requirement \"%s\"." % requirement.name) for service_instance in service_instances: if requirement.source == service_instance.name: LOG.debug("source \"%s\" was found." % requirement.source) if requirement.parameter == 'private_ip' or requirement.parameter == 'public_ip': LOG.debug("parameter \"%s\" is available." % requirement.parameter) for obj in service_instance.networks: if requirement.obj_name == obj.name: LOG.debug("obj_name \"%s\" was found." % requirement.obj_name) counter += 1 else: raise InvalidInputException("parameter:\"%s\" is not available." % requirement.parameter) if counter == 0: raise NotFoundException("requirement:\"%s\" was not found (\"source:%s\", \"obj_name:%s\")." % ( requirement.name, requirement.source, requirement.obj_name)) elif counter == 1: LOG.debug("requirement \"%s\" is valid." % requirement.name) else: raise InvalidInputException("is not valid. Found sources or objects several times.") except Exception, exc: exc.message = 'Requirement:\"%s\"->%s' % (requirement.name, exc.message) raise exc def checkRequirementsUniqueness(requirements=[]): LOG.debug("Check uniqueness of requirement names.") for requirement in requirements: try: LOG.debug("Check requirement uniqueness of name \"%s\"." % requirement.name) counter = 0 for comp_requirement in requirements: if requirement.name == comp_requirement.name: counter += 1 if counter == 0: raise InvalidInputException("name:\"%s\" was not found." % requirement.name) elif counter == 1: LOG.debug("name \"%s\" is unique." % requirement.name) elif counter > 1: raise InvalidInputException( "name:\"%s\" is not valid. Found several requirements with the same name. Name must be unique." % requirement.name) except Exception, exc: exc.message = 'Requirement:\"%s\"->%s' % (requirement.name, exc.message) raise exc
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utility functions for training.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.python.eager import context from tensorflow.python.framework import dtypes from tensorflow.python.framework import graph_io from tensorflow.python.framework import ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import resource_variable_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import variable_scope from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging as logging from tensorflow.python.util.tf_export import tf_export # Picked a long key value to minimize the chance of collision with user defined # collection keys. GLOBAL_STEP_READ_KEY = 'global_step_read_op_cache' # TODO(drpng): remove this after legacy uses are resolved. write_graph = graph_io.write_graph @tf_export('train.global_step') def global_step(sess, global_step_tensor): """Small helper to get the global step. ```python # Create a variable to hold the global_step. global_step_tensor = tf.Variable(10, trainable=False, name='global_step') # Create a session. sess = tf.Session() # Initialize the variable sess.run(global_step_tensor.initializer) # Get the variable value. print('global_step: %s' % tf.train.global_step(sess, global_step_tensor)) global_step: 10 ``` Args: sess: A TensorFlow `Session` object. global_step_tensor: `Tensor` or the `name` of the operation that contains the global step. Returns: The global step value. """ if context.executing_eagerly(): return int(global_step_tensor.numpy()) return int(sess.run(global_step_tensor)) @tf_export('train.get_global_step') def get_global_step(graph=None): """Get the global step tensor. The global step tensor must be an integer variable. We first try to find it in the collection `GLOBAL_STEP`, or by name `global_step:0`. Args: graph: The graph to find the global step in. If missing, use default graph. Returns: The global step variable, or `None` if none was found. Raises: TypeError: If the global step tensor has a non-integer type, or if it is not a `Variable`. """ graph = graph or ops.get_default_graph() global_step_tensor = None global_step_tensors = graph.get_collection(ops.GraphKeys.GLOBAL_STEP) if len(global_step_tensors) == 1: global_step_tensor = global_step_tensors[0] elif not global_step_tensors: try: global_step_tensor = graph.get_tensor_by_name('global_step:0') except KeyError: return None else: logging.error('Multiple tensors in global_step collection.') return None assert_global_step(global_step_tensor) return global_step_tensor @tf_export('train.create_global_step') def create_global_step(graph=None): """Create global step tensor in graph. Args: graph: The graph in which to create the global step tensor. If missing, use default graph. Returns: Global step tensor. Raises: ValueError: if global step tensor is already defined. """ graph = graph or ops.get_default_graph() if get_global_step(graph) is not None: raise ValueError('"global_step" already exists.') if context.executing_eagerly(): with ops.device('cpu:0'): return variable_scope.get_variable( ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) # Create in proper graph and base name_scope. with graph.as_default() as g, g.name_scope(None): return variable_scope.get_variable( ops.GraphKeys.GLOBAL_STEP, shape=[], dtype=dtypes.int64, initializer=init_ops.zeros_initializer(), trainable=False, collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) @tf_export('train.get_or_create_global_step') def get_or_create_global_step(graph=None): """Returns and create (if necessary) the global step tensor. Args: graph: The graph in which to create the global step tensor. If missing, use default graph. Returns: The global step tensor. """ graph = graph or ops.get_default_graph() global_step_tensor = get_global_step(graph) if global_step_tensor is None: global_step_tensor = create_global_step(graph) return global_step_tensor @tf_export('train.assert_global_step') def assert_global_step(global_step_tensor): """Asserts `global_step_tensor` is a scalar int `Variable` or `Tensor`. Args: global_step_tensor: `Tensor` to test. """ if not (isinstance(global_step_tensor, variables.Variable) or isinstance(global_step_tensor, ops.Tensor) or resource_variable_ops.is_resource_variable(global_step_tensor)): raise TypeError( 'Existing "global_step" must be a Variable or Tensor: %s.' % global_step_tensor) if not global_step_tensor.dtype.base_dtype.is_integer: raise TypeError('Existing "global_step" does not have integer type: %s' % global_step_tensor.dtype) if (global_step_tensor.get_shape().ndims != 0 and global_step_tensor.get_shape().is_fully_defined()): raise TypeError('Existing "global_step" is not scalar: %s' % global_step_tensor.get_shape()) def _get_global_step_read(graph=None): """Gets global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor. Raises: RuntimeError: if multiple items found in collection GLOBAL_STEP_READ_KEY. """ graph = graph or ops.get_default_graph() global_step_read_tensors = graph.get_collection(GLOBAL_STEP_READ_KEY) if len(global_step_read_tensors) > 1: raise RuntimeError('There are multiple items in collection {}. ' 'There should be only one.'.format(GLOBAL_STEP_READ_KEY)) if len(global_step_read_tensors) == 1: return global_step_read_tensors[0] return None def _get_or_create_global_step_read(graph=None): """Gets or creates global step read tensor in graph. Args: graph: The graph in which to create the global step read tensor. If missing, use default graph. Returns: Global step read tensor if there is global_step_tensor else return None. """ graph = graph or ops.get_default_graph() global_step_read_tensor = _get_global_step_read(graph) if global_step_read_tensor is not None: return global_step_read_tensor global_step_tensor = get_global_step(graph) if global_step_tensor is None: return None # add 'zero' so that it will create a copy of variable as Tensor. with graph.as_default() as g, g.name_scope(None): with g.name_scope(global_step_tensor.op.name + '/'): # using initialized_value to ensure that global_step is initialized before # this run. This is needed for example Estimator makes all model_fn build # under global_step_read_tensor dependency. global_step_value = global_step_tensor.initialized_value() if isinstance( global_step_tensor, variables.Variable) else global_step_tensor global_step_read_tensor = global_step_value + 0 ops.add_to_collection(GLOBAL_STEP_READ_KEY, global_step_read_tensor) return _get_global_step_read(graph) def _increment_global_step(increment, graph=None): graph = graph or ops.get_default_graph() global_step_tensor = get_global_step(graph) if global_step_tensor is None: raise ValueError( 'Global step tensor should be created by ' 'tf.train.get_or_create_global_step before calling increment.') global_step_read_tensor = _get_or_create_global_step_read(graph) with graph.as_default() as g, g.name_scope(None): with g.name_scope(global_step_tensor.op.name + '/'): with ops.control_dependencies([global_step_read_tensor]): return state_ops.assign_add(global_step_tensor, increment)
# Copyright 2013, Big Switch Networks, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import logging from django.utils.translation import ugettext_lazy as _ from horizon import exceptions from horizon import forms from horizon.utils import validators from horizon import workflows from openstack_dashboard import api from openstack_dashboard.dashboards.project.loadbalancers import utils AVAILABLE_PROTOCOLS = ('HTTP', 'HTTPS', 'TCP') AVAILABLE_METHODS = ('ROUND_ROBIN', 'LEAST_CONNECTIONS', 'SOURCE_IP') LOG = logging.getLogger(__name__) class AddPoolAction(workflows.Action): name = forms.CharField(max_length=80, label=_("Name")) description = forms.CharField( initial="", required=False, max_length=80, label=_("Description")) # provider is optional because some LBaaS implemetation does # not support service-type extension. provider = forms.ChoiceField(label=_("Provider"), required=False) subnet_id = forms.ChoiceField(label=_("Subnet")) protocol = forms.ChoiceField(label=_("Protocol")) lb_method = forms.ChoiceField(label=_("Load Balancing Method")) admin_state_up = forms.ChoiceField(choices=[(True, _('UP')), (False, _('DOWN'))], label=_("Admin State")) def __init__(self, request, *args, **kwargs): super(AddPoolAction, self).__init__(request, *args, **kwargs) tenant_id = request.user.tenant_id subnet_id_choices = [('', _("Select a Subnet"))] try: networks = api.neutron.network_list_for_tenant(request, tenant_id) except Exception: exceptions.handle(request, _('Unable to retrieve networks list.')) networks = [] for n in networks: for s in n['subnets']: subnet_id_choices.append((s.id, s.cidr)) self.fields['subnet_id'].choices = subnet_id_choices protocol_choices = [('', _("Select a Protocol"))] [protocol_choices.append((p, p)) for p in AVAILABLE_PROTOCOLS] self.fields['protocol'].choices = protocol_choices lb_method_choices = [('', _("Select a Method"))] [lb_method_choices.append((m, m)) for m in AVAILABLE_METHODS] self.fields['lb_method'].choices = lb_method_choices # provider choice try: if api.neutron.is_extension_supported(request, 'service-type'): provider_list = api.neutron.provider_list(request) providers = [p for p in provider_list if p['service_type'] == 'LOADBALANCER'] else: providers = None except Exception: exceptions.handle(request, _('Unable to retrieve providers list.')) providers = [] if providers: default_providers = [p for p in providers if p.get('default')] if default_providers: default_provider = default_providers[0]['name'] else: default_provider = None provider_choices = [(p['name'], p['name']) for p in providers if p['name'] != default_provider] if default_provider: provider_choices.insert( 0, (default_provider, _("%s (default)") % default_provider)) else: if providers is None: msg = _("Provider for Load Balancer is not supported") else: msg = _("No provider is available") provider_choices = [('', msg)] self.fields['provider'].widget.attrs['readonly'] = True self.fields['provider'].choices = provider_choices class Meta(object): name = _("Add New Pool") permissions = ('openstack.services.network',) help_text = _("Create Pool for current project.\n\n" "Assign a name and description for the pool. " "Choose one subnet where all members of this " "pool must be on. " "Select the protocol and load balancing method " "for this pool. " "Admin State is UP (checked) by default.") class AddPoolStep(workflows.Step): action_class = AddPoolAction contributes = ("name", "description", "subnet_id", "provider", "protocol", "lb_method", "admin_state_up") def contribute(self, data, context): context = super(AddPoolStep, self).contribute(data, context) context['admin_state_up'] = (context['admin_state_up'] == 'True') if data: return context class AddPool(workflows.Workflow): slug = "addpool" name = _("Add Pool") finalize_button_name = _("Add") success_message = _('Added pool "%s".') failure_message = _('Unable to add pool "%s".') success_url = "horizon:project:loadbalancers:index" default_steps = (AddPoolStep,) def format_status_message(self, message): name = self.context.get('name') return message % name def handle(self, request, context): try: api.lbaas.pool_create(request, **context) return True except Exception: return False class AddVipAction(workflows.Action): name = forms.CharField(max_length=80, label=_("Name")) description = forms.CharField( initial="", required=False, max_length=80, label=_("Description")) subnet_id = forms.ChoiceField(label=_("VIP Subnet"), initial="", required=False) address = forms.IPField(label=_("Specify a free IP address " "from the selected subnet"), version=forms.IPv4, mask=False, required=False) protocol_port = forms.IntegerField( label=_("Protocol Port"), min_value=1, help_text=_("Enter an integer value " "between 1 and 65535."), validators=[validators.validate_port_range]) protocol = forms.ChoiceField(label=_("Protocol")) session_persistence = forms.ChoiceField( required=False, initial={}, label=_("Session Persistence"), widget=forms.Select(attrs={ 'class': 'switchable', 'data-slug': 'persistence' })) cookie_name = forms.CharField( initial="", required=False, max_length=80, label=_("Cookie Name"), help_text=_("Required for APP_COOKIE persistence;" " Ignored otherwise."), widget=forms.TextInput(attrs={ 'class': 'switched', 'data-switch-on': 'persistence', 'data-persistence-app_cookie': 'APP_COOKIE', })) connection_limit = forms.IntegerField( required=False, min_value=-1, label=_("Connection Limit"), help_text=_("Maximum number of connections allowed " "for the VIP or '-1' if the limit is not set")) admin_state_up = forms.ChoiceField(choices=[(True, _('UP')), (False, _('DOWN'))], label=_("Admin State")) def __init__(self, request, *args, **kwargs): super(AddVipAction, self).__init__(request, *args, **kwargs) tenant_id = request.user.tenant_id subnet_id_choices = [('', _("Select a Subnet"))] try: networks = api.neutron.network_list_for_tenant(request, tenant_id) except Exception: exceptions.handle(request, _('Unable to retrieve networks list.')) networks = [] for n in networks: for s in n['subnets']: subnet_id_choices.append((s.id, s.cidr)) self.fields['subnet_id'].choices = subnet_id_choices protocol_choices = [('', _("Select a Protocol"))] [protocol_choices.append((p, p)) for p in AVAILABLE_PROTOCOLS] self.fields['protocol'].choices = protocol_choices session_persistence_choices = [('', _("No Session Persistence"))] for mode in ('SOURCE_IP', 'HTTP_COOKIE', 'APP_COOKIE'): session_persistence_choices.append((mode.lower(), mode)) self.fields[ 'session_persistence'].choices = session_persistence_choices def clean(self): cleaned_data = super(AddVipAction, self).clean() persistence = cleaned_data.get('session_persistence') if persistence: cleaned_data['session_persistence'] = persistence.upper() if (cleaned_data.get('session_persistence') == 'APP_COOKIE' and not cleaned_data.get('cookie_name')): msg = _('Cookie name is required for APP_COOKIE persistence.') self._errors['cookie_name'] = self.error_class([msg]) return cleaned_data class Meta(object): name = _("Specify VIP") permissions = ('openstack.services.network',) help_text = _("Create a VIP for this pool. " "Assign a name, description, IP address, port, " "and maximum connections allowed for the VIP. " "Choose the protocol and session persistence " "method for the VIP. " "Admin State is UP (checked) by default.") class AddVipStep(workflows.Step): action_class = AddVipAction depends_on = ("pool_id", "subnet") contributes = ("name", "description", "subnet_id", "address", "protocol_port", "protocol", "session_persistence", "cookie_name", "connection_limit", "admin_state_up") def contribute(self, data, context): context = super(AddVipStep, self).contribute(data, context) context['admin_state_up'] = (context['admin_state_up'] == 'True') return context class AddVip(workflows.Workflow): slug = "addvip" name = _("Add VIP") finalize_button_name = _("Add") success_message = _('Added VIP "%s".') failure_message = _('Unable to add VIP "%s".') success_url = "horizon:project:loadbalancers:index" default_steps = (AddVipStep,) def format_status_message(self, message): name = self.context.get('name') return message % name def handle(self, request, context): if context['subnet_id'] == '': try: pool = api.lbaas.pool_get(request, context['pool_id']) context['subnet_id'] = pool['subnet_id'] except Exception: context['subnet_id'] = None self.failure_message = _( 'Unable to retrieve the specified pool. ' 'Unable to add VIP "%s".') return False if context['session_persistence']: stype = context['session_persistence'] if stype == 'APP_COOKIE': cookie = context['cookie_name'] context['session_persistence'] = {'type': stype, 'cookie_name': cookie} else: context['session_persistence'] = {'type': stype} else: context['session_persistence'] = {} try: api.lbaas.vip_create(request, **context) return True except Exception: return False class AddMemberAction(workflows.Action): pool_id = forms.ChoiceField(label=_("Pool")) member_type = forms.ChoiceField( label=_("Member Source"), choices=[('server_list', _("Select from active instances")), ('member_address', _("Specify member IP address"))], required=False, widget=forms.Select(attrs={ 'class': 'switchable', 'data-slug': 'membertype' })) members = forms.MultipleChoiceField( label=_("Member(s)"), required=False, initial=["default"], widget=forms.SelectMultiple(attrs={ 'class': 'switched', 'data-switch-on': 'membertype', 'data-membertype-server_list': _("Member(s)"), }), help_text=_("Select members for this pool ")) address = forms.IPField(required=False, label=_("Member address"), help_text=_("Specify member IP address"), widget=forms.TextInput(attrs={ 'class': 'switched', 'data-switch-on': 'membertype', 'data-membertype-member_address': _("Member address"), }), initial="", version=forms.IPv4 | forms.IPv6, mask=False) weight = forms.IntegerField( max_value=256, min_value=1, label=_("Weight"), required=False, help_text=_("Relative part of requests this pool member serves " "compared to others. \nThe same weight will be applied to " "all the selected members and can be modified later. " "Weight must be in the range 1 to 256.") ) protocol_port = forms.IntegerField( label=_("Protocol Port"), min_value=1, help_text=_("Enter an integer value between 1 and 65535. " "The same port will be used for all the selected " "members and can be modified later."), validators=[validators.validate_port_range] ) admin_state_up = forms.ChoiceField(choices=[(True, _('UP')), (False, _('DOWN'))], label=_("Admin State")) def __init__(self, request, *args, **kwargs): super(AddMemberAction, self).__init__(request, *args, **kwargs) pool_id_choices = [('', _("Select a Pool"))] try: tenant_id = self.request.user.tenant_id pools = api.lbaas.pool_list(request, tenant_id=tenant_id) except Exception: pools = [] exceptions.handle(request, _('Unable to retrieve pools list.')) pools = sorted(pools, key=lambda pool: pool.name) for p in pools: pool_id_choices.append((p.id, p.name)) self.fields['pool_id'].choices = pool_id_choices members_choices = [] try: servers, has_more = api.nova.server_list(request) except Exception: servers = [] exceptions.handle(request, _('Unable to retrieve instances list.')) if len(servers) == 0: self.fields['members'].label = _( "No servers available. To add a member, you " "need at least one running instance.") self.fields['pool_id'].required = False self.fields['protocol_port'].required = False return for m in servers: members_choices.append((m.id, m.name)) self.fields['members'].choices = sorted( members_choices, key=lambda member: member[1]) def clean(self): cleaned_data = super(AddMemberAction, self).clean() if (cleaned_data.get('member_type') == 'server_list' and not cleaned_data.get('members')): msg = _('At least one member must be specified') self._errors['members'] = self.error_class([msg]) elif (cleaned_data.get('member_type') == 'member_address' and not cleaned_data.get('address')): msg = _('Member IP address must be specified') self._errors['address'] = self.error_class([msg]) return cleaned_data class Meta(object): name = _("Add New Member") permissions = ('openstack.services.network',) help_text = _("Add member(s) to the selected pool.\n\n" "Choose one or more listed instances to be " "added to the pool as member(s). " "Assign a numeric weight and port number for the " "selected member(s) to operate(s) on; e.g., 80. \n\n" "Only one port can be associated with " "each instance.") class AddMemberStep(workflows.Step): action_class = AddMemberAction contributes = ("pool_id", "member_type", "members", "address", "protocol_port", "weight", "admin_state_up") def contribute(self, data, context): context = super(AddMemberStep, self).contribute(data, context) context['admin_state_up'] = (context['admin_state_up'] == 'True') return context class AddMember(workflows.Workflow): slug = "addmember" name = _("Add Member") finalize_button_name = _("Add") success_message = _('Added member(s).') failure_message = _('Unable to add member(s)') success_url = "horizon:project:loadbalancers:index" default_steps = (AddMemberStep,) def handle(self, request, context): if context['member_type'] == 'server_list': try: pool = api.lbaas.pool_get(request, context['pool_id']) subnet_id = pool['subnet_id'] except Exception: self.failure_message = _('Unable to retrieve ' 'the specified pool.') return False for m in context['members']: params = {'device_id': m} try: plist = api.neutron.port_list(request, **params) except Exception: return False # Sort port list for each member. This is needed to avoid # attachment of random ports in case of creation of several # members attached to several networks. plist = sorted(plist, key=lambda port: port.network_id) psubnet = [p for p in plist for ips in p.fixed_ips if ips['subnet_id'] == subnet_id] # If possible, select a port on pool subnet. if psubnet: selected_port = psubnet[0] elif plist: selected_port = plist[0] else: selected_port = None if selected_port: context['address'] = \ selected_port.fixed_ips[0]['ip_address'] try: api.lbaas.member_create(request, **context).id except Exception as e: msg = self.failure_message LOG.info('%s: %s' % (msg, e)) return False return True else: try: context['member_id'] = api.lbaas.member_create( request, **context).id return True except Exception as e: msg = self.failure_message LOG.info('%s: %s' % (msg, e)) return False class AddMonitorAction(workflows.Action): type = forms.ChoiceField( label=_("Type"), choices=[('ping', _('PING')), ('tcp', _('TCP')), ('http', _('HTTP')), ('https', _('HTTPS'))], widget=forms.Select(attrs={ 'class': 'switchable', 'data-slug': 'type' })) delay = forms.IntegerField( min_value=1, label=_("Delay"), help_text=_("The minimum time in seconds between regular checks " "of a member")) timeout = forms.IntegerField( min_value=1, label=_("Timeout"), help_text=_("The maximum time in seconds for a monitor to wait " "for a reply")) max_retries = forms.IntegerField( max_value=10, min_value=1, label=_("Max Retries (1~10)"), help_text=_("Number of permissible failures before changing " "the status of member to inactive")) http_method = forms.ChoiceField( initial="GET", required=False, choices=[('GET', _('GET'))], label=_("HTTP Method"), help_text=_("HTTP method used to check health status of a member"), widget=forms.Select(attrs={ 'class': 'switched', 'data-switch-on': 'type', 'data-type-http': _('HTTP Method'), 'data-type-https': _('HTTP Method') })) url_path = forms.CharField( initial="/", required=False, max_length=80, label=_("URL"), widget=forms.TextInput(attrs={ 'class': 'switched', 'data-switch-on': 'type', 'data-type-http': _('URL'), 'data-type-https': _('URL') })) expected_codes = forms.RegexField( initial="200", required=False, max_length=80, regex=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$', label=_("Expected HTTP Status Codes"), help_text=_("Expected code may be a single value (e.g. 200), " "a list of values (e.g. 200, 202), " "or range of values (e.g. 200-204)"), widget=forms.TextInput(attrs={ 'class': 'switched', 'data-switch-on': 'type', 'data-type-http': _('Expected HTTP Status Codes'), 'data-type-https': _('Expected HTTP Status Codes') })) admin_state_up = forms.ChoiceField(choices=[(True, _('UP')), (False, _('DOWN'))], label=_("Admin State")) def __init__(self, request, *args, **kwargs): super(AddMonitorAction, self).__init__(request, *args, **kwargs) def clean(self): cleaned_data = super(AddMonitorAction, self).clean() type_opt = cleaned_data.get('type') delay = cleaned_data.get('delay') timeout = cleaned_data.get('timeout') if not delay >= timeout: msg = _('Delay must be greater than or equal to Timeout') self._errors['delay'] = self.error_class([msg]) if type_opt in ['http', 'https']: http_method_opt = cleaned_data.get('http_method') url_path = cleaned_data.get('url_path') expected_codes = cleaned_data.get('expected_codes') if not http_method_opt: msg = _('Please choose a HTTP method') self._errors['http_method'] = self.error_class([msg]) if not url_path: msg = _('Please specify an URL') self._errors['url_path'] = self.error_class([msg]) if not expected_codes: msg = _('Please enter a single value (e.g. 200), ' 'a list of values (e.g. 200, 202), ' 'or range of values (e.g. 200-204)') self._errors['expected_codes'] = self.error_class([msg]) return cleaned_data class Meta(object): name = _("Add New Monitor") permissions = ('openstack.services.network',) help_text = _("Create a monitor template.\n\n" "Select type of monitoring. " "Specify delay, timeout, and retry limits " "required by the monitor. " "Specify method, URL path, and expected " "HTTP codes upon success.") class AddMonitorStep(workflows.Step): action_class = AddMonitorAction contributes = ("type", "delay", "timeout", "max_retries", "http_method", "url_path", "expected_codes", "admin_state_up") def contribute(self, data, context): context = super(AddMonitorStep, self).contribute(data, context) context['admin_state_up'] = (context['admin_state_up'] == 'True') if data: return context class AddMonitor(workflows.Workflow): slug = "addmonitor" name = _("Add Monitor") finalize_button_name = _("Add") success_message = _('Added monitor') failure_message = _('Unable to add monitor') success_url = "horizon:project:loadbalancers:index" default_steps = (AddMonitorStep,) def handle(self, request, context): try: context['monitor_id'] = api.lbaas.pool_health_monitor_create( request, **context).get('id') return True except Exception: exceptions.handle(request, _("Unable to add monitor.")) return False class AddPMAssociationAction(workflows.Action): monitor_id = forms.ChoiceField(label=_("Monitor")) def __init__(self, request, *args, **kwargs): super(AddPMAssociationAction, self).__init__(request, *args, **kwargs) def populate_monitor_id_choices(self, request, context): self.fields['monitor_id'].label = _("Select a monitor template " "for %s") % context['pool_name'] monitor_id_choices = [('', _("Select a Monitor"))] try: tenant_id = self.request.user.tenant_id monitors = api.lbaas.pool_health_monitor_list(request, tenant_id=tenant_id) pool_monitors_ids = [pm.id for pm in context['pool_monitors']] for m in monitors: if m.id not in pool_monitors_ids: display_name = utils.get_monitor_display_name(m) monitor_id_choices.append((m.id, display_name)) except Exception: exceptions.handle(request, _('Unable to retrieve monitors list.')) self.fields['monitor_id'].choices = monitor_id_choices return monitor_id_choices class Meta(object): name = _("Association Details") permissions = ('openstack.services.network',) help_text = _("Associate a health monitor with target pool.") class AddPMAssociationStep(workflows.Step): action_class = AddPMAssociationAction depends_on = ("pool_id", "pool_name", "pool_monitors") contributes = ("monitor_id",) def contribute(self, data, context): context = super(AddPMAssociationStep, self).contribute(data, context) if data: return context class AddPMAssociation(workflows.Workflow): slug = "addassociation" name = _("Associate Monitor") finalize_button_name = _("Associate") success_message = _('Associated monitor.') failure_message = _('Unable to associate monitor.') success_url = "horizon:project:loadbalancers:index" default_steps = (AddPMAssociationStep,) def handle(self, request, context): try: context['monitor_id'] = api.lbaas.pool_monitor_association_create( request, **context) return True except Exception: exceptions.handle(request, _("Unable to associate monitor.")) return False class DeletePMAssociationAction(workflows.Action): monitor_id = forms.ChoiceField(label=_("Monitor")) def __init__(self, request, *args, **kwargs): super(DeletePMAssociationAction, self).__init__( request, *args, **kwargs) def populate_monitor_id_choices(self, request, context): self.fields['monitor_id'].label = (_("Select a health monitor of %s") % context['pool_name']) monitor_id_choices = [('', _("Select a Monitor"))] try: monitors = api.lbaas.pool_health_monitor_list(request) pool_monitors_ids = [pm.id for pm in context['pool_monitors']] for m in monitors: if m.id in pool_monitors_ids: display_name = utils.get_monitor_display_name(m) monitor_id_choices.append((m.id, display_name)) except Exception: exceptions.handle(request, _('Unable to retrieve monitors list.')) self.fields['monitor_id'].choices = monitor_id_choices return monitor_id_choices class Meta(object): name = _("Association Details") permissions = ('openstack.services.network',) help_text = _("Disassociate a health monitor from target pool. ") class DeletePMAssociationStep(workflows.Step): action_class = DeletePMAssociationAction depends_on = ("pool_id", "pool_name", "pool_monitors") contributes = ("monitor_id",) def contribute(self, data, context): context = super(DeletePMAssociationStep, self).contribute( data, context) if data: return context class DeletePMAssociation(workflows.Workflow): slug = "deleteassociation" name = _("Disassociate Monitor") finalize_button_name = _("Disassociate") success_message = _('Disassociated monitor.') failure_message = _('Unable to disassociate monitor.') success_url = "horizon:project:loadbalancers:index" default_steps = (DeletePMAssociationStep,) def handle(self, request, context): try: context['monitor_id'] = api.lbaas.pool_monitor_association_delete( request, **context) return True except Exception: exceptions.handle(request, _("Unable to disassociate monitor.")) return False
# Copyright (C) 2016 Atsushi Togo # All rights reserved. # # This file is part of phono3py. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # * Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in # the documentation and/or other materials provided with the # distribution. # # * Neither the name of the phonopy project nor the names of its # contributors may be used to endorse or promote products derived # from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE # COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, # INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, # BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from phonopy.interface.phonopy_yaml import PhonopyYaml import numpy as np class Phono3pyYaml(PhonopyYaml): command_name = "phono3py" default_filenames = ("phono3py_disp.yaml", "phono3py.yaml") default_settings = {'force_sets': False, 'displacements': True, 'force_constants': False, 'born_effective_charge': True, 'dielectric_constant': True} def __init__(self, configuration=None, calculator=None, physical_units=None, settings=None): self.configuration = None self.calculator = None self.physical_units = None self.settings = None self.unitcell = None self.primitive = None self.supercell = None self.dataset = None self.supercell_matrix = None self.primitive_matrix = None self.nac_params = None self.force_constants = None self.symmetry = None # symmetry of supercell self.s2p_map = None self.u2p_map = None self.frequency_unit_conversion_factor = None self.version = None # # phono3py only # # With DIM_FC2 given self.phonon_supercell_matrix = None self.phonon_dataset = None self.phonon_supercell = None self.phonon_primitive = None self._yaml = None super(Phono3pyYaml, self).__init__( configuration=configuration, calculator=calculator, physical_units=physical_units, settings=settings) def set_phonon_info(self, phono3py): super(Phono3pyYaml, self).set_phonon_info(phono3py) self.phonon_supercell_matrix = phono3py.phonon_supercell_matrix self.phonon_dataset = phono3py.phonon_dataset self.phonon_primitive = phono3py.phonon_primitive self.phonon_supercell = phono3py.phonon_supercell def parse(self): super(Phono3pyYaml, self).parse() self._parse_fc3_dataset() def _parse_all_cells(self): """Parse all cells This method override PhonopyYaml._parse_all_cells. """ super(Phono3pyYaml, self)._parse_all_cells() if 'phonon_primitive_cell' in self._yaml: self.phonon_primitive = self._parse_cell( self._yaml['phonon_primitive_cell']) if 'phonon_supercell' in self._yaml: self.phonon_supercell = self._parse_cell( self._yaml['phonon_supercell']) if 'phonon_supercell_matrix' in self._yaml: self.phonon_supercell_matrix = np.array( self._yaml['phonon_supercell_matrix'], dtype='intc', order='C') def _parse_dataset(self): """Parse phonon_dataset This method override PhonopyYaml._parse_dataset. """ self.phonon_dataset = self._get_dataset(self.phonon_supercell) def _parse_fc3_dataset(self): """ 'duplicates' can be either dict (<v1.21) or list in phono3py.yaml. From v1.21, it was changed to list of list because dict with a key of int type is not allowed in JSON. """ dataset = None if 'displacement_pairs' in self._yaml: disp = self._yaml['displacement_pairs'][0] if type(disp) is dict: # type1 dataset = self._parse_forces_fc3_type1(len(self.supercell)) elif type(disp) is list: # type2 if 'displacement' in disp[0]: dataset = self._parse_force_sets_type2() if 'displacement_pair_info' in self._yaml: info_yaml = self._yaml['displacement_pair_info'] if 'cutoff_pair_distance' in info_yaml: dataset['cutoff_distance'] = info_yaml['cutoff_pair_distance'] if 'duplicated_supercell_ids' in info_yaml: dataset['duplicates'] = info_yaml['duplicated_supercell_ids'] self.dataset = dataset def _parse_forces_fc3_type1(self, natom): dataset = {'natom': natom, 'first_atoms': []} for d1 in self._yaml['displacement_pairs']: data1 = { 'number': d1['atom'] - 1, 'displacement': np.array(d1['displacement'], dtype='double'), 'second_atoms': []} if 'forces' in d1: data1['forces'] = np.array(d1['forces'], dtype='double', order='C') d2_list = d1.get('paired_with') if d2_list is None: # backward compatibility d2_list = d1.get('second_atoms') for d2 in d2_list: if 'forces' in d2: data1['second_atoms'].append( {'number': d2['atom'] - 1, 'displacement': np.array(d2['displacement'], dtype='double'), 'forces': np.array(d2['forces'], dtype='double', order='C'), 'id': d2['displacement_id'], 'pair_distance': d2['pair_distance']}) else: disps = [{'number': d2['atom'] - 1, 'displacement': np.array(disp, dtype='double')} for disp in d2['displacements']] if 'pair_distance' in d2: for d2_dict in disps: d2_dict['pair_distance'] = d2['pair_distance'] if 'included' in d2: for d2_dict in disps: d2_dict['included'] = d2['included'] if 'displacement_ids' in d2: for disp_id, d2_dict in zip( d2['displacement_ids'], disps): d2_dict['id'] = disp_id data1['second_atoms'] += disps dataset['first_atoms'].append(data1) return dataset def _cell_info_yaml_lines(self): """Get YAML lines for information of cells This method override PhonopyYaml._cell_info_yaml_lines. """ lines = super(Phono3pyYaml, self)._cell_info_yaml_lines() lines += self._supercell_matrix_yaml_lines( self.phonon_supercell_matrix, "phonon_supercell_matrix") lines += self._primitive_yaml_lines(self.phonon_primitive, "phonon_primitive_cell") lines += self._phonon_supercell_yaml_lines() return lines def _phonon_supercell_matrix_yaml_lines(self): lines = [] if self.phonon_supercell_matrix is not None: lines.append("phonon_supercell_matrix:") for v in self.supercell_matrix: lines.append("- [ %3d, %3d, %3d ]" % tuple(v)) lines.append("") return lines def _phonon_supercell_yaml_lines(self): lines = [] if self.phonon_supercell is not None: s2p_map = getattr(self.phonon_primitive, 's2p_map', None) lines += self._cell_yaml_lines( self.phonon_supercell, "phonon_supercell", s2p_map) lines.append("") return lines def _nac_yaml_lines(self): """Get YAML lines for parameters of non-analytical term correction This method override PhonopyYaml._nac_yaml_lines. """ if self.phonon_primitive is not None: return self._nac_yaml_lines_given_symbols( self.phonon_primitive.symbols) else: return self._nac_yaml_lines_given_symbols( self.primitive.symbols) def _displacements_yaml_lines(self, with_forces=False): """Get YAML lines for phonon_dataset and dataset. This method override PhonopyYaml._displacements_yaml_lines. PhonopyYaml._displacements_yaml_lines_2types is written to be also used by Phono3pyYaml. """ lines = [] if self.phonon_supercell_matrix is not None: lines += self._displacements_yaml_lines_2types( self.phonon_dataset, with_forces=with_forces) lines += self._displacements_yaml_lines_2types( self.dataset, with_forces=with_forces) return lines def _displacements_yaml_lines_type1(self, dataset, with_forces=False): """Get YAML lines for type1 phonon_dataset and dataset. This method override PhonopyYaml._displacements_yaml_lines_type1. PhonopyYaml._displacements_yaml_lines_2types calls Phono3pyYaml._displacements_yaml_lines_type1. """ id_offset = len(dataset['first_atoms']) if 'second_atoms' in dataset['first_atoms'][0]: lines = ["displacement_pairs:"] else: lines = ["displacements:"] for i, d in enumerate(dataset['first_atoms']): lines.append("- atom: %4d" % (d['number'] + 1)) lines.append(" displacement:") lines.append(" [ %19.16f, %19.16f, %19.16f ]" % tuple(d['displacement'])) id_num = i + 1 if 'id' in d: assert id_num == d['id'] lines.append(" displacement_id: %d" % id_num) if with_forces and 'forces' in d: lines.append(" forces:") for v in d['forces']: lines.append( " - [ %19.16f, %19.16f, %19.16f ]" % tuple(v)) if 'second_atoms' in d: ret_lines, id_offset = self._second_displacements_yaml_lines( d['second_atoms'], id_offset, with_forces=with_forces) lines += ret_lines lines.append("") if 'second_atoms' in dataset['first_atoms'][0]: n_single = len(dataset['first_atoms']) n_pair = 0 n_included = 0 for d1 in dataset['first_atoms']: n_d2 = len(d1['second_atoms']) n_pair += n_d2 for d2 in d1['second_atoms']: if 'included' not in d2: n_included += 1 elif d2['included']: n_included += 1 lines.append("displacement_pair_info:") if 'cutoff_distance' in dataset: lines.append(" cutoff_pair_distance: %11.8f" % dataset['cutoff_distance']) lines.append(" number_of_singles: %d" % n_single) lines.append(" number_of_pairs: %d" % n_pair) if 'cutoff_distance' in dataset: lines.append(" number_of_pairs_in_cutoff: %d" % n_included) # 'duplicates' is dict, but written as a list of list in yaml. # See the docstring of _parse_fc3_dataset for the reason. if 'duplicates' in dataset and dataset['duplicates']: lines.append(" duplicated_supercell_ids: " "# 0 means perfect supercell") # Backward compatibility for dict type if type(dataset['duplicates']) is dict: for i, j in dataset['duplicates'].items(): lines.append(" - [ %d, %d ]" % (int(i), j)) else: for (i, j) in dataset['duplicates']: lines.append(" - [ %d, %d ]" % (i, j)) lines.append("") return lines def _second_displacements_yaml_lines(self, dataset2, id_offset, with_forces=False): lines = [] id_num = id_offset # lines.append(" second_atoms:") lines.append(" paired_with:") numbers = np.array([d['number'] for d in dataset2]) unique_numbers = np.unique(numbers) for i in unique_numbers: indices_eq_i = np.sort(np.where(numbers == i)[0]) if with_forces and 'forces' in dataset2[indices_eq_i[0]]: for j in indices_eq_i: id_num += 1 lines.append(" - atom: %4d" % (i + 1)) lines.append(" pair_distance: %.8f" % dataset2[j]['pair_distance']) lines.append(" displacement:") lines.append(" [ %19.16f, %19.16f, %19.16f ]" % tuple(dataset2[j]['displacement'])) if 'id' in dataset2[j]: assert dataset2[j]['id'] == id_num lines.append(" displacement_id: %d" % id_num) lines.append(" forces:") for v in dataset2[j]['forces']: lines.append( " - [ %19.16f, %19.16f, %19.16f ]" % tuple(v)) else: lines.append(" - atom: %4d" % (i + 1)) lines.append(" pair_distance: %.8f" % dataset2[indices_eq_i[0]]['pair_distance']) if 'included' in dataset2[indices_eq_i[0]]: included = dataset2[indices_eq_i[0]]['included'] lines.append(" included: %s" % ("true" if included else "false")) disp_ids = [] lines.append(" displacements:") for j in indices_eq_i: id_num += 1 d = tuple(dataset2[j]['displacement']) lines.append(" - [ %19.16f, %19.16f, %19.16f ]" % d) if 'id' in dataset2[j]: assert dataset2[j]['id'] == id_num disp_ids.append(dataset2[j]['id']) if disp_ids: lines.append(" displacement_ids: [ %s ]" % ', '.join(["%d" % j for j in disp_ids])) return lines, id_num
# -*- coding: utf-8 -*- # Copyright 2014 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Utility module for translating XML API objects to/from JSON objects.""" from __future__ import absolute_import import datetime import json import re import textwrap import xml.etree.ElementTree from apitools.base.py import encoding import boto from boto.gs.acl import ACL from boto.gs.acl import ALL_AUTHENTICATED_USERS from boto.gs.acl import ALL_USERS from boto.gs.acl import Entries from boto.gs.acl import Entry from boto.gs.acl import GROUP_BY_DOMAIN from boto.gs.acl import GROUP_BY_EMAIL from boto.gs.acl import GROUP_BY_ID from boto.gs.acl import USER_BY_EMAIL from boto.gs.acl import USER_BY_ID from gslib.cloud_api import ArgumentException from gslib.cloud_api import BucketNotFoundException from gslib.cloud_api import NotFoundException from gslib.cloud_api import Preconditions from gslib.exception import CommandException from gslib.third_party.storage_apitools import storage_v1_messages as apitools_messages # In Python 2.6, ElementTree raises ExpatError instead of ParseError. # pylint: disable=g-import-not-at-top try: from xml.etree.ElementTree import ParseError as XmlParseError except ImportError: from xml.parsers.expat import ExpatError as XmlParseError CACHE_CONTROL_REGEX = re.compile(r'^cache-control', re.I) CONTENT_DISPOSITION_REGEX = re.compile(r'^content-disposition', re.I) CONTENT_ENCODING_REGEX = re.compile(r'^content-encoding', re.I) CONTENT_LANGUAGE_REGEX = re.compile(r'^content-language', re.I) CONTENT_MD5_REGEX = re.compile(r'^content-md5', re.I) CONTENT_TYPE_REGEX = re.compile(r'^content-type', re.I) GOOG_API_VERSION_REGEX = re.compile(r'^x-goog-api-version', re.I) GOOG_GENERATION_MATCH_REGEX = re.compile(r'^x-goog-if-generation-match', re.I) GOOG_METAGENERATION_MATCH_REGEX = re.compile( r'^x-goog-if-metageneration-match', re.I) CUSTOM_GOOG_METADATA_REGEX = re.compile(r'^x-goog-meta-(?P<header_key>.*)', re.I) CUSTOM_AMZ_METADATA_REGEX = re.compile(r'^x-amz-meta-(?P<header_key>.*)', re.I) CUSTOM_AMZ_HEADER_REGEX = re.compile(r'^x-amz-(?P<header_key>.*)', re.I) # gsutil-specific GUIDs for marking special metadata for S3 compatibility. S3_ACL_MARKER_GUID = '3b89a6b5-b55a-4900-8c44-0b0a2f5eab43-s3-AclMarker' S3_DELETE_MARKER_GUID = 'eadeeee8-fa8c-49bb-8a7d-0362215932d8-s3-DeleteMarker' S3_MARKER_GUIDS = [S3_ACL_MARKER_GUID, S3_DELETE_MARKER_GUID] # This distinguishes S3 custom headers from S3 metadata on objects. S3_HEADER_PREFIX = 'custom-amz-header' DEFAULT_CONTENT_TYPE = 'application/octet-stream' # Because CORS is just a list in apitools, we need special handling or blank # CORS lists will get sent with other configuration commands such as lifecycle, # which would cause CORS configuration to be unintentionally removed. # Protorpc defaults list values to an empty list, and won't allow us to set the # value to None like other configuration fields, so there is no way to # distinguish the default value from when we actually want to remove the CORS # configuration. To work around this, we create a dummy CORS entry that # signifies that we should nullify the CORS configuration. # A value of [] means don't modify the CORS configuration. # A value of REMOVE_CORS_CONFIG means remove the CORS configuration. REMOVE_CORS_CONFIG = [apitools_messages.Bucket.CorsValueListEntry( maxAgeSeconds=-1, method=['REMOVE_CORS_CONFIG'])] # Similar to CORS above, we need a sentinel value allowing us to specify # when a default object ACL should be private (containing no entries). # A defaultObjectAcl value of [] means don't modify the default object ACL. # A value of [PRIVATE_DEFAULT_OBJ_ACL] means create an empty/private default # object ACL. PRIVATE_DEFAULT_OBJ_ACL = apitools_messages.ObjectAccessControl( id='PRIVATE_DEFAULT_OBJ_ACL') def ObjectMetadataFromHeaders(headers): """Creates object metadata according to the provided headers. gsutil -h allows specifiying various headers (originally intended to be passed to boto in gsutil v3). For the JSON API to be compatible with this option, we need to parse these headers into gsutil_api Object fields. Args: headers: Dict of headers passed via gsutil -h Raises: ArgumentException if an invalid header is encountered. Returns: apitools Object with relevant fields populated from headers. """ obj_metadata = apitools_messages.Object() for header, value in headers.items(): if CACHE_CONTROL_REGEX.match(header): obj_metadata.cacheControl = value.strip() elif CONTENT_DISPOSITION_REGEX.match(header): obj_metadata.contentDisposition = value.strip() elif CONTENT_ENCODING_REGEX.match(header): obj_metadata.contentEncoding = value.strip() elif CONTENT_MD5_REGEX.match(header): obj_metadata.md5Hash = value.strip() elif CONTENT_LANGUAGE_REGEX.match(header): obj_metadata.contentLanguage = value.strip() elif CONTENT_TYPE_REGEX.match(header): if not value: obj_metadata.contentType = DEFAULT_CONTENT_TYPE else: obj_metadata.contentType = value.strip() elif GOOG_API_VERSION_REGEX.match(header): # API version is only relevant for XML, ignore and rely on the XML API # to add the appropriate version. continue elif GOOG_GENERATION_MATCH_REGEX.match(header): # Preconditions are handled elsewhere, but allow these headers through. continue elif GOOG_METAGENERATION_MATCH_REGEX.match(header): # Preconditions are handled elsewhere, but allow these headers through. continue else: custom_goog_metadata_match = CUSTOM_GOOG_METADATA_REGEX.match(header) custom_amz_metadata_match = CUSTOM_AMZ_METADATA_REGEX.match(header) custom_amz_header_match = CUSTOM_AMZ_HEADER_REGEX.match(header) header_key = None if custom_goog_metadata_match: header_key = custom_goog_metadata_match.group('header_key') elif custom_amz_metadata_match: header_key = custom_amz_metadata_match.group('header_key') elif custom_amz_header_match: # If we got here we are guaranteed by the prior statement that this is # not an x-amz-meta- header. header_key = (S3_HEADER_PREFIX + custom_amz_header_match.group('header_key')) if header_key: if header_key.lower() == 'x-goog-content-language': # Work around content-language being inserted into custom metadata. continue if not obj_metadata.metadata: obj_metadata.metadata = apitools_messages.Object.MetadataValue() if not obj_metadata.metadata.additionalProperties: obj_metadata.metadata.additionalProperties = [] obj_metadata.metadata.additionalProperties.append( apitools_messages.Object.MetadataValue.AdditionalProperty( key=header_key, value=value)) else: raise ArgumentException( 'Invalid header specifed: %s:%s' % (header, value)) return obj_metadata def HeadersFromObjectMetadata(dst_obj_metadata, provider): """Creates a header dictionary based on existing object metadata. Args: dst_obj_metadata: Object metadata to create the headers from. provider: Provider string ('gs' or 's3') Returns: Headers dictionary. """ headers = {} if not dst_obj_metadata: return # Metadata values of '' mean suppress/remove this header. if dst_obj_metadata.cacheControl is not None: if not dst_obj_metadata.cacheControl: headers['cache-control'] = None else: headers['cache-control'] = dst_obj_metadata.cacheControl.strip() if dst_obj_metadata.contentDisposition: if not dst_obj_metadata.contentDisposition: headers['content-disposition'] = None else: headers['content-disposition'] = ( dst_obj_metadata.contentDisposition.strip()) if dst_obj_metadata.contentEncoding: if not dst_obj_metadata.contentEncoding: headers['content-encoding'] = None else: headers['content-encoding'] = dst_obj_metadata.contentEncoding.strip() if dst_obj_metadata.contentLanguage: if not dst_obj_metadata.contentLanguage: headers['content-language'] = None else: headers['content-language'] = dst_obj_metadata.contentLanguage.strip() if dst_obj_metadata.md5Hash: if not dst_obj_metadata.md5Hash: headers['Content-MD5'] = None else: headers['Content-MD5'] = dst_obj_metadata.md5Hash.strip() if dst_obj_metadata.contentType is not None: if not dst_obj_metadata.contentType: headers['content-type'] = None else: headers['content-type'] = dst_obj_metadata.contentType.strip() if (dst_obj_metadata.metadata and dst_obj_metadata.metadata.additionalProperties): for additional_property in dst_obj_metadata.metadata.additionalProperties: # Work around content-language being inserted into custom metadata by # the XML API. if additional_property.key == 'content-language': continue # Don't translate special metadata markers. if additional_property.key in S3_MARKER_GUIDS: continue if provider == 'gs': header_name = 'x-goog-meta-' + additional_property.key elif provider == 's3': if additional_property.key.startswith(S3_HEADER_PREFIX): header_name = ('x-amz-' + additional_property.key[len(S3_HEADER_PREFIX):]) else: header_name = 'x-amz-meta-' + additional_property.key else: raise ArgumentException('Invalid provider specified: %s' % provider) if (additional_property.value is not None and not additional_property.value): headers[header_name] = None else: headers[header_name] = additional_property.value return headers def CopyObjectMetadata(src_obj_metadata, dst_obj_metadata, override=False): """Copies metadata from src_obj_metadata to dst_obj_metadata. Args: src_obj_metadata: Metadata from source object dst_obj_metadata: Initialized metadata for destination object override: If true, will overwrite metadata in destination object. If false, only writes metadata for values that don't already exist. """ if override or not dst_obj_metadata.cacheControl: dst_obj_metadata.cacheControl = src_obj_metadata.cacheControl if override or not dst_obj_metadata.contentDisposition: dst_obj_metadata.contentDisposition = src_obj_metadata.contentDisposition if override or not dst_obj_metadata.contentEncoding: dst_obj_metadata.contentEncoding = src_obj_metadata.contentEncoding if override or not dst_obj_metadata.contentLanguage: dst_obj_metadata.contentLanguage = src_obj_metadata.contentLanguage if override or not dst_obj_metadata.contentType: dst_obj_metadata.contentType = src_obj_metadata.contentType if override or not dst_obj_metadata.md5Hash: dst_obj_metadata.md5Hash = src_obj_metadata.md5Hash # TODO: Apitools should ideally treat metadata like a real dictionary instead # of a list of key/value pairs (with an O(N^2) lookup). In practice the # number of values is typically small enough not to matter. # Work around this by creating our own dictionary. if (src_obj_metadata.metadata and src_obj_metadata.metadata.additionalProperties): if not dst_obj_metadata.metadata: dst_obj_metadata.metadata = apitools_messages.Object.MetadataValue() if not dst_obj_metadata.metadata.additionalProperties: dst_obj_metadata.metadata.additionalProperties = [] dst_metadata_dict = {} for dst_prop in dst_obj_metadata.metadata.additionalProperties: dst_metadata_dict[dst_prop.key] = dst_prop.value for src_prop in src_obj_metadata.metadata.additionalProperties: if src_prop.key in dst_metadata_dict: if override: # Metadata values of '' mean suppress/remove this header. if src_prop.value is not None and not src_prop.value: dst_metadata_dict[src_prop.key] = None else: dst_metadata_dict[src_prop.key] = src_prop.value else: dst_metadata_dict[src_prop.key] = src_prop.value # Rewrite the list with our updated dict. dst_obj_metadata.metadata.additionalProperties = [] for k, v in dst_metadata_dict.iteritems(): dst_obj_metadata.metadata.additionalProperties.append( apitools_messages.Object.MetadataValue.AdditionalProperty(key=k, value=v)) def PreconditionsFromHeaders(headers): """Creates bucket or object preconditions acccording to the provided headers. Args: headers: Dict of headers passed via gsutil -h Returns: gsutil Cloud API Preconditions object fields populated from headers, or None if no precondition headers are present. """ return_preconditions = Preconditions() try: for header, value in headers.items(): if GOOG_GENERATION_MATCH_REGEX.match(header): return_preconditions.gen_match = long(value) if GOOG_METAGENERATION_MATCH_REGEX.match(header): return_preconditions.meta_gen_match = long(value) except ValueError, _: raise ArgumentException('Invalid precondition header specified. ' 'x-goog-if-generation-match and ' 'x-goog-if-metageneration match must be specified ' 'with a positive integer value.') return return_preconditions def CreateNotFoundExceptionForObjectWrite( dst_provider, dst_bucket_name, src_provider=None, src_bucket_name=None, src_object_name=None, src_generation=None): """Creates a NotFoundException for an object upload or copy. This is necessary because 404s don't necessarily specify which resource does not exist. Args: dst_provider: String abbreviation of destination provider, e.g., 'gs'. dst_bucket_name: Destination bucket name for the write operation. src_provider: String abbreviation of source provider, i.e. 'gs', if any. src_bucket_name: Source bucket name, if any (for the copy case). src_object_name: Source object name, if any (for the copy case). src_generation: Source object generation, if any (for the copy case). Returns: NotFoundException with appropriate message. """ dst_url_string = '%s://%s' % (dst_provider, dst_bucket_name) if src_bucket_name and src_object_name: src_url_string = '%s://%s/%s' % (src_provider, src_bucket_name, src_object_name) if src_generation: src_url_string += '#%s' % str(src_generation) return NotFoundException( 'The source object %s or the destination bucket %s does not exist.' % (src_url_string, dst_url_string)) return NotFoundException( 'The destination bucket %s does not exist or the write to the ' 'destination must be restarted' % dst_url_string) def CreateBucketNotFoundException(code, provider, bucket_name): return BucketNotFoundException('%s://%s bucket does not exist.' % (provider, bucket_name), bucket_name, status=code) def CreateObjectNotFoundException(code, provider, bucket_name, object_name, generation=None): uri_string = '%s://%s/%s' % (provider, bucket_name, object_name) if generation: uri_string += '#%s' % str(generation) return NotFoundException('%s does not exist.' % uri_string, status=code) def EncodeStringAsLong(string_to_convert): """Encodes an ASCII string as a python long. This is used for modeling S3 version_id's as apitools generation. Because python longs can be arbitrarily large, this works. Args: string_to_convert: ASCII string to convert to a long. Returns: Long that represents the input string. """ return long(string_to_convert.encode('hex'), 16) def _DecodeLongAsString(long_to_convert): """Decodes an encoded python long into an ASCII string. This is used for modeling S3 version_id's as apitools generation. Args: long_to_convert: long to convert to ASCII string. If this is already a string, it is simply returned. Returns: String decoded from the input long. """ if isinstance(long_to_convert, basestring): # Already converted. return long_to_convert return hex(long_to_convert)[2:-1].decode('hex') def GenerationFromUrlAndString(url, generation): """Decodes a generation from a StorageURL and a generation string. This is used to represent gs and s3 versioning. Args: url: StorageUrl representing the object. generation: Long or string representing the object's generation or version. Returns: Valid generation string for use in URLs. """ if url.scheme == 's3' and generation: return _DecodeLongAsString(generation) return generation def CheckForXmlConfigurationAndRaise(config_type_string, json_txt): """Checks a JSON parse exception for provided XML configuration.""" try: xml.etree.ElementTree.fromstring(str(json_txt)) raise ArgumentException('\n'.join(textwrap.wrap( 'XML {0} data provided; Google Cloud Storage {0} configuration ' 'now uses JSON format. To convert your {0}, set the desired XML ' 'ACL using \'gsutil {1} set ...\' with gsutil version 3.x. Then ' 'use \'gsutil {1} get ...\' with gsutil version 4 or greater to ' 'get the corresponding JSON {0}.'.format(config_type_string, config_type_string.lower())))) except XmlParseError: pass raise ArgumentException('JSON %s data could not be loaded ' 'from: %s' % (config_type_string, json_txt)) class LifecycleTranslation(object): """Functions for converting between various lifecycle formats. This class handles conversation to and from Boto Cors objects, JSON text, and apitools Message objects. """ @classmethod def BotoLifecycleFromMessage(cls, lifecycle_message): """Translates an apitools message to a boto lifecycle object.""" boto_lifecycle = boto.gs.lifecycle.LifecycleConfig() if lifecycle_message: for rule_message in lifecycle_message.rule: boto_rule = boto.gs.lifecycle.Rule() if (rule_message.action and rule_message.action.type and rule_message.action.type.lower() == 'delete'): boto_rule.action = boto.gs.lifecycle.DELETE if rule_message.condition: if rule_message.condition.age: boto_rule.conditions[boto.gs.lifecycle.AGE] = ( str(rule_message.condition.age)) if rule_message.condition.createdBefore: boto_rule.conditions[boto.gs.lifecycle.CREATED_BEFORE] = ( str(rule_message.condition.createdBefore)) if rule_message.condition.isLive: boto_rule.conditions[boto.gs.lifecycle.IS_LIVE] = ( str(rule_message.condition.isLive)) if rule_message.condition.numNewerVersions: boto_rule.conditions[boto.gs.lifecycle.NUM_NEWER_VERSIONS] = ( str(rule_message.condition.numNewerVersions)) boto_lifecycle.append(boto_rule) return boto_lifecycle @classmethod def BotoLifecycleToMessage(cls, boto_lifecycle): """Translates a boto lifecycle object to an apitools message.""" lifecycle_message = None if boto_lifecycle: lifecycle_message = apitools_messages.Bucket.LifecycleValue() for boto_rule in boto_lifecycle: lifecycle_rule = ( apitools_messages.Bucket.LifecycleValue.RuleValueListEntry()) lifecycle_rule.condition = (apitools_messages.Bucket.LifecycleValue. RuleValueListEntry.ConditionValue()) if boto_rule.action and boto_rule.action == boto.gs.lifecycle.DELETE: lifecycle_rule.action = (apitools_messages.Bucket.LifecycleValue. RuleValueListEntry.ActionValue( type='Delete')) if boto.gs.lifecycle.AGE in boto_rule.conditions: lifecycle_rule.condition.age = int( boto_rule.conditions[boto.gs.lifecycle.AGE]) if boto.gs.lifecycle.CREATED_BEFORE in boto_rule.conditions: lifecycle_rule.condition.createdBefore = ( LifecycleTranslation.TranslateBotoLifecycleTimestamp( boto_rule.conditions[boto.gs.lifecycle.CREATED_BEFORE])) if boto.gs.lifecycle.IS_LIVE in boto_rule.conditions: lifecycle_rule.condition.isLive = bool( boto_rule.conditions[boto.gs.lifecycle.IS_LIVE]) if boto.gs.lifecycle.NUM_NEWER_VERSIONS in boto_rule.conditions: lifecycle_rule.condition.numNewerVersions = int( boto_rule.conditions[boto.gs.lifecycle.NUM_NEWER_VERSIONS]) lifecycle_message.rule.append(lifecycle_rule) return lifecycle_message @classmethod def JsonLifecycleFromMessage(cls, lifecycle_message): """Translates an apitools message to lifecycle JSON.""" return str(encoding.MessageToJson(lifecycle_message)) + '\n' @classmethod def JsonLifecycleToMessage(cls, json_txt): """Translates lifecycle JSON to an apitools message.""" try: deserialized_lifecycle = json.loads(json_txt) # If lifecycle JSON is the in the following format # {'lifecycle': {'rule': ... then strip out the 'lifecycle' key # and reduce it to the following format # {'rule': ... if 'lifecycle' in deserialized_lifecycle: deserialized_lifecycle = deserialized_lifecycle['lifecycle'] lifecycle = encoding.DictToMessage( deserialized_lifecycle, apitools_messages.Bucket.LifecycleValue) return lifecycle except ValueError: CheckForXmlConfigurationAndRaise('lifecycle', json_txt) @classmethod def TranslateBotoLifecycleTimestamp(cls, lifecycle_datetime): """Parses the timestamp from the boto lifecycle into a datetime object.""" return datetime.datetime.strptime(lifecycle_datetime, '%Y-%m-%d').date() class CorsTranslation(object): """Functions for converting between various CORS formats. This class handles conversation to and from Boto Cors objects, JSON text, and apitools Message objects. """ @classmethod def BotoCorsFromMessage(cls, cors_message): """Translates an apitools message to a boto Cors object.""" cors = boto.gs.cors.Cors() cors.cors = [] for collection_message in cors_message: collection_elements = [] if collection_message.maxAgeSeconds: collection_elements.append((boto.gs.cors.MAXAGESEC, str(collection_message.maxAgeSeconds))) if collection_message.method: method_elements = [] for method in collection_message.method: method_elements.append((boto.gs.cors.METHOD, method)) collection_elements.append((boto.gs.cors.METHODS, method_elements)) if collection_message.origin: origin_elements = [] for origin in collection_message.origin: origin_elements.append((boto.gs.cors.ORIGIN, origin)) collection_elements.append((boto.gs.cors.ORIGINS, origin_elements)) if collection_message.responseHeader: header_elements = [] for header in collection_message.responseHeader: header_elements.append((boto.gs.cors.HEADER, header)) collection_elements.append((boto.gs.cors.HEADERS, header_elements)) cors.cors.append(collection_elements) return cors @classmethod def BotoCorsToMessage(cls, boto_cors): """Translates a boto Cors object to an apitools message.""" message_cors = [] if boto_cors.cors: for cors_collection in boto_cors.cors: if cors_collection: collection_message = apitools_messages.Bucket.CorsValueListEntry() for element_tuple in cors_collection: if element_tuple[0] == boto.gs.cors.MAXAGESEC: collection_message.maxAgeSeconds = int(element_tuple[1]) if element_tuple[0] == boto.gs.cors.METHODS: for method_tuple in element_tuple[1]: collection_message.method.append(method_tuple[1]) if element_tuple[0] == boto.gs.cors.ORIGINS: for origin_tuple in element_tuple[1]: collection_message.origin.append(origin_tuple[1]) if element_tuple[0] == boto.gs.cors.HEADERS: for header_tuple in element_tuple[1]: collection_message.responseHeader.append(header_tuple[1]) message_cors.append(collection_message) return message_cors @classmethod def JsonCorsToMessageEntries(cls, json_cors): """Translates CORS JSON to an apitools message. Args: json_cors: JSON string representing CORS configuration. Returns: List of apitools Bucket.CorsValueListEntry. An empty list represents no CORS configuration. """ try: deserialized_cors = json.loads(json_cors) cors = [] for cors_entry in deserialized_cors: cors.append(encoding.DictToMessage( cors_entry, apitools_messages.Bucket.CorsValueListEntry)) return cors except ValueError: CheckForXmlConfigurationAndRaise('CORS', json_cors) @classmethod def MessageEntriesToJson(cls, cors_message): """Translates an apitools message to CORS JSON.""" json_text = '' # Because CORS is a MessageField, serialize/deserialize as JSON list. json_text += '[' printed_one = False for cors_entry in cors_message: if printed_one: json_text += ',' else: printed_one = True json_text += encoding.MessageToJson(cors_entry) json_text += ']\n' return json_text def S3MarkerAclFromObjectMetadata(object_metadata): """Retrieves GUID-marked S3 ACL from object metadata, if present. Args: object_metadata: Object metadata to check. Returns: S3 ACL text, if present, None otherwise. """ if (object_metadata and object_metadata.metadata and object_metadata.metadata.additionalProperties): for prop in object_metadata.metadata.additionalProperties: if prop.key == S3_ACL_MARKER_GUID: return prop.value def AddS3MarkerAclToObjectMetadata(object_metadata, acl_text): """Adds a GUID-marked S3 ACL to the object metadata. Args: object_metadata: Object metadata to add the acl to. acl_text: S3 ACL text to add. """ if not object_metadata.metadata: object_metadata.metadata = apitools_messages.Object.MetadataValue() if not object_metadata.metadata.additionalProperties: object_metadata.metadata.additionalProperties = [] object_metadata.metadata.additionalProperties.append( apitools_messages.Object.MetadataValue.AdditionalProperty( key=S3_ACL_MARKER_GUID, value=acl_text)) class AclTranslation(object): """Functions for converting between various ACL formats. This class handles conversion to and from Boto ACL objects, JSON text, and apitools Message objects. """ JSON_TO_XML_ROLES = {'READER': 'READ', 'WRITER': 'WRITE', 'OWNER': 'FULL_CONTROL'} XML_TO_JSON_ROLES = {'READ': 'READER', 'WRITE': 'WRITER', 'FULL_CONTROL': 'OWNER'} @classmethod def BotoAclFromJson(cls, acl_json): acl = ACL() acl.parent = None acl.entries = cls.BotoEntriesFromJson(acl_json, acl) return acl @classmethod # acl_message is a list of messages, either object or bucketaccesscontrol def BotoAclFromMessage(cls, acl_message): acl_dicts = [] for message in acl_message: if message == PRIVATE_DEFAULT_OBJ_ACL: # Sentinel value indicating acl_dicts should be an empty list to create # a private (no entries) default object ACL. break acl_dicts.append(encoding.MessageToDict(message)) return cls.BotoAclFromJson(acl_dicts) @classmethod def BotoAclToJson(cls, acl): if hasattr(acl, 'entries'): return cls.BotoEntriesToJson(acl.entries) return [] @classmethod def BotoObjectAclToMessage(cls, acl): for entry in cls.BotoAclToJson(acl): message = encoding.DictToMessage(entry, apitools_messages.ObjectAccessControl) message.kind = u'storage#objectAccessControl' yield message @classmethod def BotoBucketAclToMessage(cls, acl): for entry in cls.BotoAclToJson(acl): message = encoding.DictToMessage(entry, apitools_messages.BucketAccessControl) message.kind = u'storage#bucketAccessControl' yield message @classmethod def BotoEntriesFromJson(cls, acl_json, parent): entries = Entries(parent) entries.parent = parent entries.entry_list = [cls.BotoEntryFromJson(entry_json) for entry_json in acl_json] return entries @classmethod def BotoEntriesToJson(cls, entries): return [cls.BotoEntryToJson(entry) for entry in entries.entry_list] @classmethod def BotoEntryFromJson(cls, entry_json): """Converts a JSON entry into a Boto ACL entry.""" entity = entry_json['entity'] permission = cls.JSON_TO_XML_ROLES[entry_json['role']] if entity.lower() == ALL_USERS.lower(): return Entry(type=ALL_USERS, permission=permission) elif entity.lower() == ALL_AUTHENTICATED_USERS.lower(): return Entry(type=ALL_AUTHENTICATED_USERS, permission=permission) elif entity.startswith('project'): raise CommandException('XML API does not support project scopes, ' 'cannot translate ACL.') elif 'email' in entry_json: if entity.startswith('user'): scope_type = USER_BY_EMAIL elif entity.startswith('group'): scope_type = GROUP_BY_EMAIL return Entry(type=scope_type, email_address=entry_json['email'], permission=permission) elif 'entityId' in entry_json: if entity.startswith('user'): scope_type = USER_BY_ID elif entity.startswith('group'): scope_type = GROUP_BY_ID return Entry(type=scope_type, id=entry_json['entityId'], permission=permission) elif 'domain' in entry_json: if entity.startswith('domain'): scope_type = GROUP_BY_DOMAIN return Entry(type=scope_type, domain=entry_json['domain'], permission=permission) raise CommandException('Failed to translate JSON ACL to XML.') @classmethod def BotoEntryToJson(cls, entry): """Converts a Boto ACL entry to a valid JSON dictionary.""" acl_entry_json = {} # JSON API documentation uses camel case. scope_type_lower = entry.scope.type.lower() if scope_type_lower == ALL_USERS.lower(): acl_entry_json['entity'] = 'allUsers' elif scope_type_lower == ALL_AUTHENTICATED_USERS.lower(): acl_entry_json['entity'] = 'allAuthenticatedUsers' elif scope_type_lower == USER_BY_EMAIL.lower(): acl_entry_json['entity'] = 'user-%s' % entry.scope.email_address acl_entry_json['email'] = entry.scope.email_address elif scope_type_lower == USER_BY_ID.lower(): acl_entry_json['entity'] = 'user-%s' % entry.scope.id acl_entry_json['entityId'] = entry.scope.id elif scope_type_lower == GROUP_BY_EMAIL.lower(): acl_entry_json['entity'] = 'group-%s' % entry.scope.email_address acl_entry_json['email'] = entry.scope.email_address elif scope_type_lower == GROUP_BY_ID.lower(): acl_entry_json['entity'] = 'group-%s' % entry.scope.id acl_entry_json['entityId'] = entry.scope.id elif scope_type_lower == GROUP_BY_DOMAIN.lower(): acl_entry_json['entity'] = 'domain-%s' % entry.scope.domain acl_entry_json['domain'] = entry.scope.domain else: raise ArgumentException('ACL contains invalid scope type: %s' % scope_type_lower) acl_entry_json['role'] = cls.XML_TO_JSON_ROLES[entry.permission] return acl_entry_json @classmethod def JsonToMessage(cls, json_data, message_type): """Converts the input JSON data into list of Object/BucketAccessControls. Args: json_data: String of JSON to convert. message_type: Which type of access control entries to return, either ObjectAccessControl or BucketAccessControl. Raises: ArgumentException on invalid JSON data. Returns: List of ObjectAccessControl or BucketAccessControl elements. """ try: deserialized_acl = json.loads(json_data) acl = [] for acl_entry in deserialized_acl: acl.append(encoding.DictToMessage(acl_entry, message_type)) return acl except ValueError: CheckForXmlConfigurationAndRaise('ACL', json_data) @classmethod def JsonFromMessage(cls, acl): """Strips unnecessary fields from an ACL message and returns valid JSON. Args: acl: iterable ObjectAccessControl or BucketAccessControl Returns: ACL JSON string. """ serializable_acl = [] if acl is not None: for acl_entry in acl: if acl_entry.kind == u'storage#objectAccessControl': acl_entry.object = None acl_entry.generation = None acl_entry.kind = None acl_entry.bucket = None acl_entry.id = None acl_entry.selfLink = None acl_entry.etag = None serializable_acl.append(encoding.MessageToDict(acl_entry)) return json.dumps(serializable_acl, sort_keys=True, indent=2, separators=(',', ': '))
import requests from cStringIO import StringIO import csv import json DEFAULT_LIMIT = 1000 MAX_LIMIT = 50000 DEFAULT_OFFSET = None __author__ = "Cristina Munoz <hi@xmunoz.com>" class Socrata(object): def __init__(self, domain, app_token, username=None, password=None, access_token=None, session_adapter=None): ''' The required arguments are: domain: the domain you wish you to access app_token: your Socrata application token Simple requests are possible without an app_token, though these requests will be rate-limited. For write/update/delete operations or private datasets, the Socrata API currently supports basic HTTP authentication, which requires these additional parameters. username: your Socrata username password: your Socrata password The basic HTTP authentication comes with a deprecation warning, and the current recommended authentication method is OAuth 2.0. To make requests on behalf of the user using OAuth 2.0 authentication, follow the recommended procedure and provide the final access_token to the client. More information about authentication can be found in the official docs: http://dev.socrata.com/docs/authentication.html ''' if not domain: raise Exception("A domain is required.") self.domain = domain # set up the session with proper authentication crendentials self.session = requests.Session() if not app_token: print ("Warning: requests made without an app_token will be subject to strict throttling limits.") else: self.session.headers.update({"X-App-token": app_token}) self.authentication_validation(username, password, access_token) # use either basic HTTP auth or OAuth2.0 if username and password: self.session.auth = (username, password) elif access_token: self.session.headers.update({"Authorization": "OAuth {0}" .format(access_token)}) if session_adapter: self.session.mount(session_adapter["prefix"], session_adapter["adapter"]) self.uri_prefix = session_adapter["prefix"] else: self.uri_prefix = "https" def authentication_validation(self, username, password, access_token): ''' Only accept one form of authentication. ''' if bool(username) != bool(password): raise Exception("Basic authentication requires a username AND" " password.") if (username and access_token) or (password and access_token): raise Exception("Cannot use both Basic Authentication and" " OAuth2.0. Please use only one authentication" " method.") def create(self, file_object): raise NotImplementedError() def get(self, resource, **kwargs): ''' Read data from the requested resource. Optionally, specify a keyword arg to filter results: select : the set of columns to be returned, defaults to * where : filters the rows to be returned, defaults to limit order : specifies the order of results group : column to group results on limit : max number of results to return, defaults to 1000 offset : offset, used for paging. Defaults to 0 q : performs a full text search for a value exclude_system_fields : defaults to true. If set to false, the response will include system fields (:id, :created_at, and :updated_at) More information about the SoQL parameters can be found at the official docs: http://dev.socrata.com/docs/queries.html More information about system fields can be found here: http://dev.socrata.com/docs/system-fields.html ''' headers = _clear_empty_values({"Accept": kwargs.pop("format", None)}) params = { "$select": kwargs.pop("select", None), "$where": kwargs.pop("where", None), "$order": kwargs.pop("order", None), "$group": kwargs.pop("group", None), "$limit": kwargs.pop("limit", None), "$offset": kwargs.pop("offset", None), "$q": kwargs.pop("q", None), "$$exclude_system_fields": kwargs.pop("exclude_system_fields", None) } params.update(kwargs) params = _clear_empty_values(params) if params.get("$limit") and params["$limit"] > MAX_LIMIT: raise Exception("Max limit exceeded! {0} is greater than the" " Socrata API limit of {1}. More information on" " the official API docs:" " http://dev.socrata.com/docs/paging.html" .format(params["$limit"], MAX_LIMIT)) response = self._perform_request("get", resource, headers=headers, params=params) return response def upsert(self, resource, payload): ''' Insert, update or delete data to/from an existing dataset. Currently supports json and csv file objects. See here for the upsert documentation: http://dev.socrata.com/publishers/upsert.html ''' return self._perform_update("post", resource, payload) def replace(self, resource, payload): ''' Same logic as upsert, but overwrites existing data with the payload using PUT instead of POST. ''' return self._perform_update("put", resource, payload) def _perform_update(self, method, resource, payload): if isinstance(payload, list): response = self._perform_request(method, resource, data=json.dumps(payload)) elif isinstance(payload, file): headers = { "content-type": "text/csv", } response = self._perform_request(method, resource, data=payload, headers=headers) else: raise Exception("Unrecognized payload {0}. Currently only lists" " and files are supported.".format(type(payload))) return response def delete(self, resource, id=None): ''' Delete the entire dataset, e.g. client.delete("/resource/nimj-3ivp.json") or a single row, e.g. client.delete("/resource/nimj-3ivp.json", id=4) ''' if id: base, content_type = resource.rsplit(".", 1) delete_uri = "{0}/{1}.{2}".format(base, id, content_type) else: delete_uri = resource.replace("resource", "api/views") return self._perform_request("delete", delete_uri) def _perform_request(self, request_type, resource, **kwargs): ''' Utility method that performs all requests. ''' request_type_methods = set(["get", "post", "put", "delete"]) if request_type not in request_type_methods: raise Exception("Unknown request type. Supported request types are" ": {0}".format(", ".join(request_type_methods))) uri = "{0}://{1}{2}".format(self.uri_prefix, self.domain, resource) # set a timeout, just to be safe kwargs["timeout"] = 10 response = getattr(self.session, request_type)(uri, **kwargs) # handle errors if response.status_code not in (200, 202): _raise_for_status(response) # deletes have no content body, simple return the whole response if request_type == "delete": return response # for other request types, return most useful data content_type = response.headers.get('content-type').strip().lower() if content_type == "application/json; charset=utf-8": return response.json() elif content_type == "text/csv; charset=utf-8": csv_stream = StringIO(response.text) return [line for line in csv.reader(csv_stream)] elif content_type == "application/rdf+xml; charset=utf-8": return response.content else: raise Exception("Unknown response format: {0}" .format(content_type)) def close(self): self.session.close() # helper methods def _raise_for_status(response): ''' Custom raise_for_status with more appropriate error message. ''' http_error_msg = "" if 400 <= response.status_code < 500: http_error_msg = "{0} Client Error: {1}".format(response.status_code, response.reason) elif 500 <= response.status_code < 600: http_error_msg = "{0} Server Error: {1}".format(response.status_code, response.reason) if http_error_msg: try: more_info = response.json().get("message") except ValueError: more_info = None if more_info and more_info.lower() != response.reason.lower(): http_error_msg += ".\n\t{0}".format(more_info) raise requests.exceptions.HTTPError(http_error_msg, response=response) def _clear_empty_values(args): result = {} for param in args: if args[param] is not None: result[param] = args[param] return result
""" ArcGIS python toolboxes for ``propagator``. This contains Classes compatible with ArcGIS python toolbox infrastructure. (c) Geosyntec Consultants, 2015. Released under the BSD 3-clause license (see LICENSE file for more info) Written by Paul Hobson (phobson@geosyntec.com) """ from functools import partial from textwrap import dedent import numpy import arcpy from propagator import analysis from propagator import validate from propagator import utils from propagator import base_tbx def propagate(subcatchments=None, id_col=None, ds_col=None, monitoring_locations=None, ml_filter=None, ml_filter_cols=None, value_columns=None, streams=None, output_path=None, verbose=False, asMessage=False): """ Propagate water quality scores upstream from the subcatchments of a watershed. Parameters ---------- subcatchments : str Path to the feature class containing the subcatchments. Attribute table must contain fields for the subcatchment ID and the ID of the downstream subcatchment. id_col, ds_col : str Names of the fields in the ``subcatchments`` feature class that specifies the subcatchment ID and the ID of the downstream subcatchment, respectively. monitoring_locations : str Path to the feature class containing the monitoring locations and water quality scores. value_columns : list of str List of the fields in ``monitoring_locations`` that contains water quality score that should be propagated. ml_filter : callable, optional Function used to exclude (remove) monitoring locations from from aggregation/propagation. ml_filter_cols : str, optional Name of any additional columns in ``monitoring_locations`` that are required to use ``ml_filter``. streams : str Path to the feature class containing the streams. output_path : str Path to where the the new subcatchments feature class with the propagated water quality scores should be saved. Returns ------- output_path : str Examples -------- >>> import propagator >>> from propagator import utils >>> with utils.WorkSpace('C:/gis/SOC.gdb'): ... propagator.propagate( ... subcatchments='subbasins', ... id_col='Catch_ID', ... ds_col='DS_ID', ... monitoring_locations='wq_data', ... value_columns=['Dry_Metals', 'Wet_Metals', 'Wet_TSS'], ... ml_filter=lambda row: row['StationType'] != 'Coastal', ... ml_filter_cols=['StationType'], ... streams='SOC_streams', ... output_path='propagated_metals' ... ) See also -------- propagator.analysis.preprocess_wq propagator.analysis.mark_edges propagator.analysis.propagate_scores propagator.analysis.aggregate_streams_by_subcatchment propagator.utils.update_attribute_table """ subcatchment_output = utils.add_suffix_to_filename(output_path, 'subcatchments') stream_output = utils.add_suffix_to_filename(output_path, 'streams') wq, result_columns = analysis.preprocess_wq( monitoring_locations=monitoring_locations, ml_filter=ml_filter, ml_filter_cols=ml_filter_cols, subcatchments=subcatchments, value_columns=value_columns, id_col=id_col, ds_col=ds_col, output_path=subcatchment_output, verbose=verbose, asMessage=asMessage, msg="Aggregating water quality data in subcatchments" ) wq = analysis.mark_edges( wq, id_col=id_col, ds_col=ds_col, edge_ID='EDGE', verbose=verbose, asMessage=asMessage, msg="Marking all subcatchments that flow out of the watershed" ) for n, res_col in enumerate(result_columns, 1): wq = analysis.propagate_scores( subcatchment_array=wq, id_col=id_col, ds_col=ds_col, value_column=res_col, edge_ID='EDGE', verbose=verbose, asMessage=asMessage, msg="{} of {}: Propagating {} scores".format(n, len(result_columns), res_col) ) utils.update_attribute_table(subcatchment_output, wq, id_col, result_columns) stream_output = analysis.aggregate_streams_by_subcatchment( stream_layer=streams, subcatchment_layer=subcatchment_output, id_col=id_col, ds_col=ds_col, other_cols=result_columns, agg_method='first', output_layer=stream_output, verbose=verbose, asMessage=asMessage, msg='Aggregating and associating scores with streams.', ) return subcatchment_output, stream_output def accumulate(subcatchments_layer=None, id_col=None, ds_col=None, value_columns=None, streams_layer=None, output_layer=None, default_aggfxn='sum', ignored_value=None, verbose=False, asMessage=False): """ Accumulate upstream subcatchment properties in each stream segment. Parameters ---------- subcatchments_layer, streams_layer : str Names of the feature classes containing subcatchments and streams, respectively. id_col, ds_col : str Names of the fields in ``subcatchment_layer`` that contain the subcatchment ID and downstream subcatchment ID, respectively. sum_cols, avg_cols : list of str Names of the fields that will be accumulated by summing (e.g., number of permit violations) and area-weighted averaging (e.g., percent impervious land cover). .. note :: Do not include a column for subcatchment area in ``sum_cols``. Specify that in ``area_col`` instead. value_columns : list of str List of the fields in ``subcatchments`` that contains water quality score and watershed property that should be propagated. ``subcatchments_layer``. Falls back to computing areas on-the-fly if not provided. output_layer : str, optional Names of the new layer where the results should be saved. Returns ------- output_layer : str Names of the new layer where the results were successfully saved. See also -------- propagator.analysis.aggregate_streams_by_subcatchment propagator.analysis.collect_upstream_attributes propagator.utils.rec_groupby """ # Separate value columns into field name and aggregation method value_columns = validate.value_column_stats(value_columns, default_aggfxn) value_columns_aggmethods = [i[1] for i in value_columns] vc_field_wfactor = [] for col, aggmethod, wfactor in value_columns: if aggmethod.lower() == 'weighted_average': vc_field_wfactor.append([col, wfactor]) else: vc_field_wfactor.append(col) # define the Statistic objects that will be passed to `rec_groupby` statfxns = [] for agg in value_columns_aggmethods: statfxns.append(partial( utils.stats_with_ignored_values, statfxn=analysis.AGG_METHOD_DICT[agg.lower()], ignored_value=ignored_value )) res_columns = [ '{}{}'.format(prefix[:3].upper(), col) for col, prefix, _ in value_columns ] stats = [ utils.Statistic(srccol, statfxn, rescol) for srccol, statfxn, rescol in zip(vc_field_wfactor, statfxns, res_columns) ] # create a unique list of columns we need # from the subcatchment layer target_fields = [] for s in stats: if numpy.isscalar(s.srccol): target_fields.append(s.srccol) else: target_fields.extend(s.srccol) target_fields = numpy.unique(target_fields) # split the stream at the subcatchment boundaries and then # aggregate all of the stream w/i each subcatchment # into single geometries/records. split_streams_layer = analysis.aggregate_streams_by_subcatchment( stream_layer=streams_layer, subcatchment_layer=subcatchments_layer, id_col=id_col, ds_col=ds_col, other_cols=target_fields, output_layer=output_layer, agg_method="first", # first works b/c all values are equal ) # Add target_field columns back to spilt_stream_layer. final_fields = [s.rescol for s in stats] for field in final_fields: utils.add_field_with_value( table=split_streams_layer, field_name=field, field_value=None, field_type='DOUBLE', ) # load the split/aggregated streams' attribute table split_streams_table = utils.load_attribute_table( split_streams_layer, id_col, ds_col, *final_fields ) # load the subcatchment attribute table subcatchments_table = utils.load_attribute_table( subcatchments_layer, id_col, ds_col, *target_fields ) upstream_attributes = analysis.collect_upstream_attributes( subcatchments_table=subcatchments_table, target_subcatchments=split_streams_table, id_col=id_col, ds_col=ds_col, preserved_fields=target_fields ) aggregated_properties = utils.rec_groupby(upstream_attributes, id_col, *stats) # Update output layer with aggregated values. utils.update_attribute_table( layerpath=split_streams_layer, attribute_array=aggregated_properties, id_column=id_col, orig_columns=final_fields, ) # Remove extraneous columns required_columns = [id_col, ds_col, 'FID', 'Shape', 'Shape_Length', 'Shape_Area', 'OBJECTID'] fields_to_remove = filter( lambda name: name not in required_columns and name not in final_fields, [f.name for f in arcpy.ListFields(split_streams_layer)] ) utils.delete_columns(split_streams_layer, *fields_to_remove) return split_streams_layer class Propagator(base_tbx.BaseToolbox_Mixin): """ ArcGIS Python toolbox to propagate water quality metrics upstream through subcatchments in a watershed. Parameters ---------- None See also -------- Accumulator """ def __init__(self): """ Define the tool (tool name is the name of the class). """ # std attributes self.label = "1 - Propagate WQ scores to upstream subcatchments" self.description = dedent(""" TDB """) # lazy properties self._workspace = None self._subcatchments = None self._ID_column = None self._downstream_ID_column = None self._monitoring_locations = None self._ml_type_col = None self._included_ml_types = None self._value_columns = None self._output_layer = None self._streams = None self._add_output_to_map = None @property def monitoring_locations(self): """ The monitoring location points whose data will be propagated to the subcatchments. """ if self._monitoring_locations is None: self._monitoring_locations = arcpy.Parameter( displayName="Monitoring Locations", name="monitoring_locations", datatype="DEFeatureClass", parameterType="Required", direction="Input", multiValue=False ) self._set_parameter_dependency(self._monitoring_locations, self.workspace) return self._monitoring_locations @property def ml_type_col(self): if self._ml_type_col is None: self._ml_type_col = arcpy.Parameter( displayName="Monitoring Location Type Column", name="ml_type_col", datatype="Field", parameterType="Required", direction="Input", multiValue=False ) self._set_parameter_dependency(self._ml_type_col, self.monitoring_locations) return self._ml_type_col @property def included_ml_types(self): if self._included_ml_types is None: self._included_ml_types = arcpy.Parameter( displayName="Monitoring Location Types To Include", name="included_ml_types", datatype="GPString", parameterType="Required", direction="Input", multiValue=True, ) self._included_ml_types.filter.type = "ValueList" return self._included_ml_types @property def value_columns(self): """ The names of the fields to be propagated into upstream subcatchments. Note on property 'multiValue': it appears that by setting datatype to 'Value Table' the multiValue becomes irrevlant. Regardless on how we set the value here, when the function is called a False value is assigned to multiValue. However, the toolbox will still accept multiple entries. """ if self._value_columns is None: self._value_columns = arcpy.Parameter( displayName="Values to be Propagated", name="value_columns", datatype="Value Table", parameterType="Required", direction="Input", multiValue=True, ) self._value_columns.columns = [ ['String', 'Values To Propagate'], ['String', 'Aggregation Method'] ] self._set_parameter_dependency(self._value_columns, self.monitoring_locations) return self._value_columns def updateParameters(self, parameters): params = self._get_parameter_dict(parameters) param_vals = self._get_parameter_values(parameters) ws = param_vals.get('workspace', '.') vc = params['value_columns'] with utils.WorkSpace(ws): ml = param_vals['monitoring_locations'] if params['ml_type_col'].altered: col = param_vals['ml_type_col'] values = utils.unique_field_values(ml, col).tolist() params['included_ml_types'].filter.list = values if params['monitoring_locations'].value: agg_methods = analysis.AGG_METHOD_DICT.copy() agg_methods.pop('weighted_average', None) fields = analysis._get_wq_fields(ml, ['dry', 'wet']) self._set_filter_list(vc.filters[0], fields) self._set_filter_list(vc.filters[1], list(agg_methods.keys())) self._update_value_table_with_default(vc, 'average') def _params_as_list(self): params = [ self.workspace, self.subcatchments, self.ID_column, self.downstream_ID_column, self.monitoring_locations, self.ml_type_col, self.included_ml_types, self.value_columns, self.streams, self.output_layer, self.add_output_to_map, ] return params def analyze(self, **params): """ Propagates water quality scores from monitoring locations to upstream subcatchments. Calls directly to :func:`propagate`. """ # analysis options ws = params.pop('workspace', '.') overwrite = params.pop('overwrite', True) add_output_to_map = params.pop('add_output_to_map', False) output_layer = params.pop('output_layer', None) # subcatchment info sc = params.pop('subcatchments', None) ID_col = params.pop('ID_column', None) downstream_ID_col = params.pop('downstream_ID_column', None) # monitoring location info ml = params.pop('monitoring_locations', None) ml_type_col = params.pop('ml_type_col', None) included_ml_types = validate.non_empty_list( params.pop('included_ml_types', None), on_fail='create' ) # monitoring location type filter function if ml_type_col is not None and len(included_ml_types) > 0: ml_filter = lambda row: row[ml_type_col] in included_ml_types else: ml_filter = None # value columns and aggregations value_cols_string = params.pop('value_columns', None) value_columns = [vc.split(' ') for vc in value_cols_string.replace(' #', ' average').split(';')] # streams data streams = params.pop('streams', None) # perform the analysis with utils.WorkSpace(ws), utils.OverwriteState(overwrite): output_layers = propagate( subcatchments=sc, id_col=ID_col, ds_col=downstream_ID_col, monitoring_locations=ml, ml_filter=ml_filter, ml_filter_cols=ml_type_col, value_columns=value_columns, output_path=output_layer, streams=streams, verbose=True, asMessage=True, ) if add_output_to_map: for lyr in output_layers: self._add_to_map(lyr) return output_layers class Accumulator(base_tbx.BaseToolbox_Mixin): """ ArcGIS Python toolbox to accumulate subcatchments attributes and water quality parameters downstream through a stream. Parameters ---------- None See also -------- Propagator """ def __init__(self): """Define the tool (tool name is the name of the class).""" # std attributes self.label = "2 - Accumulate subcatchment properties to stream" self.description = dedent(""" TDB """) # lazy properties self._workspace = None self._subcatchments = None self._ID_column = None self._downstream_ID_column = None self._value_columns = None self._streams = None self._output_layer = None self._add_output_to_map = None def _params_as_list(self): params = [ self.workspace, self.subcatchments, self.ID_column, self.downstream_ID_column, self.value_columns, self.streams, self.output_layer, self.add_output_to_map, ] return params @property def value_columns(self): """ The names of the fields to be propagated into upstream subcatchments. Note on property 'multiValue': it appears that by setting datatype to 'Value Table' the multiValue becomes irrevlant. Regardless on how we set the value here, when the function is called a False value is assigned to multiValue. However, the toolbox will still accept multiple entries. """ if self._value_columns is None: self._value_columns = arcpy.Parameter( displayName="Values to be Accumulated", name="value_columns", datatype="Value Table", parameterType="Required", direction="Input", multiValue=True, ) self._value_columns.columns = [ ['String', 'Values To Accumulate'], ['String', 'Accumulation Method'], ['String', 'Weighting Factor'] ] self._set_parameter_dependency(self._value_columns, self.subcatchments) return self._value_columns def updateParameters(self, parameters): params = self._get_parameter_dict(parameters) param_vals = self._get_parameter_values(parameters) ws = param_vals.get('workspace', '.') vc = params['value_columns'] with utils.WorkSpace(ws): sc = param_vals['subcatchments'] # handles field name from Propagator output prefix = [i[0:3] for i in analysis.AGG_METHOD_DICT.keys()] # handles unmodified field name prefix.extend(['area', 'imp', 'dry', 'wet']) if params['subcatchments'].value: fields = analysis._get_wq_fields(sc, prefix) fields.append('n/a') self._set_filter_list(vc.filters[0], fields) self._set_filter_list(vc.filters[1], list(analysis.AGG_METHOD_DICT.keys())) self._set_filter_list(vc.filters[2], fields) self._update_value_table_with_default(vc, ['sum', 'n/a']) def analyze(self, **params): """ Accumulates subcatchments properties from upstream subcatchments into stream. Calls directly to :func:`accumulate`. """ # analysis options ws = params.pop('workspace', '.') overwrite = params.pop('overwrite', True) add_output_to_map = params.pop('add_output_to_map', False) # input parameters sc = params.pop('subcatchments', None) ID_col = params.pop('ID_column', None) downstream_ID_col = params.pop('downstream_ID_column', None) # value columns and aggregations value_cols_string = params.pop('value_columns', None) value_columns = [vc.split(' ') for vc in value_cols_string.replace(' #', ' average').split(';')] streams = params.pop('streams', None) output_layer = params.pop('output_layer', None) with utils.WorkSpace(ws), utils.OverwriteState(overwrite): output_layers = accumulate( subcatchments_layer=sc, id_col=ID_col, ds_col=downstream_ID_col, value_columns=value_columns, streams_layer=streams, output_layer=output_layer, verbose=True, asMessage=True, ) if add_output_to_map: self._add_to_map(output_layers) return output_layers
"""This module contains a collection of unit tests which validate the ..tor_async_google_pubsub module. """ import httplib import unittest import uuid import mock from . import AsyncHTTPClientPatcher from .. import tor_async_google from .. import tor_async_google_pubsub class AsyncActionTestCase(unittest.TestCase): pass class AsyncGeneratePubSubAccessTokenTestCase(unittest.TestCase): def test_ctr_without_async_state(self): credentials_filename = uuid.uuid4().hex async_action = tor_async_google_pubsub.AsyncGeneratePubSubAccessToken( credentials_filename) self.assertEqual(async_action.credentials_filename, credentials_filename) self.assertIsNotNone(async_action.scope) self.assertIsNone(async_action.async_state) def test_ctr_with_async_state(self): credentials_filename = uuid.uuid4().hex async_state = uuid.uuid4().hex async_action = tor_async_google_pubsub.AsyncGeneratePubSubAccessToken( credentials_filename, async_state) self.assertEqual(async_action.credentials_filename, credentials_filename) self.assertIsNotNone(async_action.scope) self.assertEqual(async_action.async_state, async_state) class AsyncGetTopicTestCase(unittest.TestCase): def test_ctr_without_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex agt = tor_async_google_pubsub.AsyncGetTopic( access_token, topic) self.assertEqual(agt.access_token, access_token) self.assertEqual(agt.topic, topic) self.assertIsNone(agt.async_state) self.assertIsNone(agt.get_failure_detail) def test_ctr_with_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex async_state = uuid.uuid4().hex agt = tor_async_google_pubsub.AsyncGetTopic( access_token, topic, async_state) self.assertEqual(agt.access_token, access_token) self.assertEqual(agt.topic, topic) self.assertEqual(agt.async_state, async_state) self.assertIsNone(agt.get_failure_detail) def test_http_get_ok(self): response = mock.Mock( code=httplib.OK, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='GET')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex agt = tor_async_google_pubsub.AsyncGetTopic( access_token, topic) callback = mock.Mock() agt.get(callback) callback.assert_called_once_with(True, True, agt) self.assertEqual(agt.get_failure_detail, type(agt).GFD_OK) def test_http_get_not_found(self): response = mock.Mock( code=httplib.NOT_FOUND, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='GET')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex agt = tor_async_google_pubsub.AsyncGetTopic( access_token, topic) callback = mock.Mock() agt.get(callback) callback.assert_called_once_with(True, False, agt) self.assertEqual(agt.get_failure_detail, type(agt).GFD_OK) def test_http_get_error(self): response = mock.Mock( code=httplib.INTERNAL_SERVER_ERROR, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='GET')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex agt = tor_async_google_pubsub.AsyncGetTopic( access_token, topic) callback = mock.Mock() agt.get(callback) callback.assert_called_once_with(False, None, agt) self.assertEqual(agt.get_failure_detail, type(agt).GFD_ERROR_GETTING_TOPIC) class AsyncCreateTopicTestCase(unittest.TestCase): def test_ctr_without_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex act = tor_async_google_pubsub.AsyncCreateTopic( access_token, topic) self.assertEqual(act.access_token, access_token) self.assertEqual(act.topic, topic) self.assertIsNone(act.async_state) self.assertIsNone(act.create_failure_detail) def test_ctr_with_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex async_state = uuid.uuid4().hex act = tor_async_google_pubsub.AsyncCreateTopic( access_token, topic, async_state) self.assertEqual(act.access_token, access_token) self.assertEqual(act.topic, topic) self.assertEqual(act.async_state, async_state) self.assertIsNone(act.create_failure_detail) def test_http_put_error(self): response = mock.Mock( code=httplib.INTERNAL_SERVER_ERROR, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='PUT')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex act = tor_async_google_pubsub.AsyncCreateTopic( access_token, topic) callback = mock.Mock() act.create(callback) callback.assert_called_once_with(False, act) self.assertEqual(act.create_failure_detail, type(act).CFD_ERROR_CREATING_TOPIC) def test_happy_path(self): response = mock.Mock( code=httplib.OK, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='PUT')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex act = tor_async_google_pubsub.AsyncCreateTopic( access_token, topic) callback = mock.Mock() act.create(callback) callback.assert_called_once_with(True, act) self.assertEqual(act.create_failure_detail, type(act).CFD_OK) class AsyncDeleteTopicTestCase(unittest.TestCase): def test_ctr_without_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex adt = tor_async_google_pubsub.AsyncDeleteTopic( access_token, topic) self.assertEqual(adt.access_token, access_token) self.assertEqual(adt.topic, topic) self.assertIsNone(adt.async_state) self.assertIsNone(adt.delete_failure_detail) def test_ctr_with_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex async_state = uuid.uuid4().hex adt = tor_async_google_pubsub.AsyncDeleteTopic( access_token, topic, async_state) self.assertEqual(adt.access_token, access_token) self.assertEqual(adt.topic, topic) self.assertEqual(adt.async_state, async_state) self.assertIsNone(adt.delete_failure_detail) def test_http_delete_error(self): response = mock.Mock( code=httplib.INTERNAL_SERVER_ERROR, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='DELETE')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex adt = tor_async_google_pubsub.AsyncDeleteTopic( access_token, topic) callback = mock.Mock() adt.delete(callback) callback.assert_called_once_with(False, adt) self.assertEqual(adt.delete_failure_detail, type(adt).DFD_ERROR_DELETING_TOPIC) def test_happy_path(self): response = mock.Mock( code=httplib.OK, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='DELETE')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex adt = tor_async_google_pubsub.AsyncDeleteTopic( access_token, topic) callback = mock.Mock() adt.delete(callback) callback.assert_called_once_with(True, adt) self.assertEqual(adt.delete_failure_detail, type(adt).DFD_OK) class AsyncGetSubscriptionTopicTestCase(unittest.TestCase): def test_ctr_without_async_state(self): access_token = uuid.uuid4().hex subscription = uuid.uuid4().hex ags = tor_async_google_pubsub.AsyncGetSubscription( access_token, subscription) self.assertEqual(ags.access_token, access_token) self.assertEqual(ags.subscription, subscription) self.assertIsNone(ags.async_state) self.assertIsNone(ags.get_failure_detail) def test_ctr_with_async_state(self): access_token = uuid.uuid4().hex subscription = uuid.uuid4().hex async_state = uuid.uuid4().hex ags = tor_async_google_pubsub.AsyncGetSubscription( access_token, subscription, async_state) self.assertEqual(ags.access_token, access_token) self.assertEqual(ags.subscription, subscription) self.assertEqual(ags.async_state, async_state) self.assertIsNone(ags.get_failure_detail) def test_http_get_ok(self): response = mock.Mock( code=httplib.OK, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='GET')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) subscription = uuid.uuid4().hex ags = tor_async_google_pubsub.AsyncGetSubscription( access_token, subscription) callback = mock.Mock() ags.get(callback) callback.assert_called_once_with(True, True, ags) self.assertEqual(ags.get_failure_detail, type(ags).GFD_OK) def test_http_get_not_found(self): response = mock.Mock( code=httplib.NOT_FOUND, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='GET')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) subscription = uuid.uuid4().hex ags = tor_async_google_pubsub.AsyncGetSubscription( access_token, subscription) callback = mock.Mock() ags.get(callback) callback.assert_called_once_with(True, False, ags) self.assertEqual(ags.get_failure_detail, type(ags).GFD_OK) def test_http_get_error(self): response = mock.Mock( code=httplib.INTERNAL_SERVER_ERROR, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='GET')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) subscription = uuid.uuid4().hex ags = tor_async_google_pubsub.AsyncGetSubscription( access_token, subscription) callback = mock.Mock() ags.get(callback) callback.assert_called_once_with(False, None, ags) self.assertEqual(ags.get_failure_detail, type(ags).GFD_ERROR_GETTING_SUBSCRIPTION) class AsyncCreatePushSubscriptionTestCase(unittest.TestCase): def test_ctr_without_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex subscription = uuid.uuid4().hex ack_deadline_in_seconds = uuid.uuid4().hex push_endpoint = uuid.uuid4().hex acs = tor_async_google_pubsub.AsyncCreatePushSubscription( access_token, topic, subscription, ack_deadline_in_seconds, push_endpoint) self.assertEqual(acs.access_token, access_token) self.assertEqual(acs.topic, topic) self.assertEqual(acs.subscription, subscription) self.assertEqual(acs.ack_deadline_in_seconds, ack_deadline_in_seconds) self.assertEqual(acs.push_endpoint, push_endpoint) self.assertIsNone(acs.async_state) self.assertIsNone(acs.create_failure_detail) def test_ctr_with_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex subscription = uuid.uuid4().hex ack_deadline_in_seconds = uuid.uuid4().hex push_endpoint = uuid.uuid4().hex async_state = uuid.uuid4().hex acs = tor_async_google_pubsub.AsyncCreatePushSubscription( access_token, topic, subscription, ack_deadline_in_seconds, push_endpoint, async_state) self.assertEqual(acs.access_token, access_token) self.assertEqual(acs.topic, topic) self.assertEqual(acs.subscription, subscription) self.assertEqual(acs.ack_deadline_in_seconds, ack_deadline_in_seconds) self.assertEqual(acs.push_endpoint, push_endpoint) self.assertEqual(acs.async_state, async_state) self.assertIsNone(acs.create_failure_detail) def test_http_error(self): response = mock.Mock( code=httplib.INTERNAL_SERVER_ERROR, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='PUT')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex subscription = uuid.uuid4().hex ack_deadline_in_seconds = uuid.uuid4().hex push_endpoint = uuid.uuid4().hex acs = tor_async_google_pubsub.AsyncCreatePushSubscription( access_token, topic, subscription, ack_deadline_in_seconds, push_endpoint) callback = mock.Mock() acs.create(callback) callback.assert_called_once_with(False, acs) self.assertEqual(acs.create_failure_detail, type(acs).CFD_ERROR_CREATING_SUBSCRIPTION) def test_happy_path(self): response = mock.Mock( code=httplib.OK, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='PUT')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) topic = uuid.uuid4().hex subscription = uuid.uuid4().hex ack_deadline_in_seconds = uuid.uuid4().hex push_endpoint = uuid.uuid4().hex acs = tor_async_google_pubsub.AsyncCreatePushSubscription( access_token, topic, subscription, ack_deadline_in_seconds, push_endpoint) callback = mock.Mock() acs.create(callback) callback.assert_called_once_with(True, acs) self.assertEqual(acs.create_failure_detail, type(acs).CFD_OK) class AsyncDeleteSubscriptionTestCase(unittest.TestCase): def test_ctr_without_async_state(self): access_token = uuid.uuid4().hex subscription = uuid.uuid4().hex ads = tor_async_google_pubsub.AsyncDeleteSubscription( access_token, subscription) self.assertEqual(ads.access_token, access_token) self.assertEqual(ads.subscription, subscription) self.assertIsNone(ads.async_state) self.assertIsNone(ads.delete_failure_detail) def test_ctr_with_async_state(self): access_token = uuid.uuid4().hex subscription = uuid.uuid4().hex async_state = uuid.uuid4().hex ads = tor_async_google_pubsub.AsyncDeleteSubscription( access_token, subscription, async_state) self.assertEqual(ads.access_token, access_token) self.assertEqual(ads.subscription, subscription) self.assertEqual(ads.async_state, async_state) self.assertIsNone(ads.delete_failure_detail) def test_http_error(self): response = mock.Mock( code=httplib.INTERNAL_SERVER_ERROR, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='DELETE')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) subscription = uuid.uuid4().hex ads = tor_async_google_pubsub.AsyncDeleteSubscription( access_token, subscription) callback = mock.Mock() ads.delete(callback) callback.assert_called_once_with(False, ads) self.assertEqual(ads.delete_failure_detail, type(ads).DFD_ERROR_DELETING_SUBSCRIPTION) def test_happy_path(self): response = mock.Mock( code=httplib.OK, headers={}, time_info={}, request_time=0.042, request=mock.Mock(method='DELETE')) with AsyncHTTPClientPatcher([response]): access_token = tor_async_google.RegeneratingAccessToken(uuid.uuid4().hex, uuid.uuid4().hex) subscription = uuid.uuid4().hex ads = tor_async_google_pubsub.AsyncDeleteSubscription( access_token, subscription) callback = mock.Mock() ads.delete(callback) callback.assert_called_once_with(True, ads) self.assertEqual(ads.delete_failure_detail, type(ads).DFD_OK) class AsyncPublishMessageTestCase(unittest.TestCase): def test_ctr_without_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex data = uuid.uuid4().hex apm = tor_async_google_pubsub.AsyncPublishMessage( access_token, topic, data) self.assertEqual(apm.access_token, access_token) self.assertEqual(apm.topic, topic) self.assertEqual(apm.data, data) self.assertIsNone(apm.async_state) self.assertIsNone(apm.publish_failure_detail) def test_ctr_with_async_state(self): access_token = uuid.uuid4().hex topic = uuid.uuid4().hex data = uuid.uuid4().hex async_state = uuid.uuid4().hex apm = tor_async_google_pubsub.AsyncPublishMessage( access_token, topic, data, async_state) self.assertEqual(apm.access_token, access_token) self.assertEqual(apm.topic, topic) self.assertEqual(apm.data, data) self.assertEqual(apm.async_state, async_state) self.assertIsNone(apm.publish_failure_detail)
#!/usr/bin/env python """The various output plugins for GenericHunts.""" import csv import threading import urllib from grr.lib import aff4 from grr.lib import config_lib from grr.lib import email_alerts from grr.lib import export from grr.lib import rdfvalue from grr.lib import registry from grr.lib import rendering from grr.lib import utils from grr.proto import flows_pb2 class HuntOutputPlugin(object): """The base class for output plugins. The way output plugins work is that for each result a hunt produces, all its registered output plugins get handed the result to store it in the respective format in turn. The methods a plugin has to provide are ProcessResponse which gets handed the actual result to process and Flush which is called before the output plugin is about to be pickled and stored in the database. """ __metaclass__ = registry.MetaclassRegistry name = "" description = "" args_type = None def __init__(self, collection_urn, args=None, token=None, state=None): """HuntOutputPlugin constructor. HuntOutputPlugin constructor is called during StartHuntFlow and therefore runs with security checks enabled (if they're enabled in the config). Therefore it's a bad idea to write anything to AFF4 in the constructor. Args: collection_urn: URN of the collection which results are going to be processed. args: This plugin's arguments. token: Security token. state: Instance of rdfvalue.FlowState. Represents plugin's state. If this is passed, no initialization will be performed, only the state will be applied. Raises: ValueError: when state argument is passed together with args or token arguments. """ if state and (token or args): raise ValueError("'state' argument can't be passed together with 'args' " "or 'token'.") if not state: self.state = state or rdfvalue.FlowState() self.state.Register("collection_urn", collection_urn) self.state.Register("args", args) self.state.Register("token", token) self.Initialize() else: self.state = state self.args = self.state.args self.token = self.state.token self.lock = threading.RLock() def Initialize(self): """Initializes the hunt output plugin. Initialize() is called when hunt is created. It can be used to register state variables. It's called on the worker, so no security checks apply. """ def ProcessResponses(self, responses): """Processes bunch of responses. Multiple ProcessResponses() calls can be done in a row. They're *always* followed by a Flush() call. ProcessResponses() is called on the worker, so no security checks apply. NOTE: this method should be thread-safe as it may be called from multiple threads to improve hunt output performance. Args: responses: GrrMessages from the hunt results collection. """ raise NotImplementedError() def Flush(self): """Flushes the output plugin's state. Flush is *always* called after a series of ProcessResponses() calls. Flush() is called on the worker, so no security checks apply. NOTE: This method doesn't have to be thread-safe as it's called after all ProcessResponses() calls are complete. """ pass class EmailPluginArgs(rdfvalue.RDFProtoStruct): protobuf = flows_pb2.EmailPluginArgs class EmailPlugin(HuntOutputPlugin): """An output plugin that sends an email for each response received.""" name = "email" description = "Send an email for each result." args_type = EmailPluginArgs template = """ <html><body><h1>GRR Hunt's results collection %(collection_urn)s got a new result.</h1> <p> Grr Hunt's results collection %(collection_urn)s just got a response from client %(client_id)s (%(hostname)s): <br /> <br /> %(response)s <br /> Click <a href='%(admin_ui)s/#%(urn)s'> here </a> to access this machine. <br /> This notification was created by %(creator)s. </p> %(additional_message)s <p>Thanks,</p> <p>%(signature)s</p> </body></html>""" too_many_mails_msg = ("<p> This hunt has now produced %d results so the " "sending of emails will be disabled now. </p>") def Initialize(self): self.state.Register("emails_sent", 0) super(EmailPlugin, self).Initialize() def ProcessResponse(self, response): """Sends an email for each response.""" if self.state.emails_sent >= self.state.args.email_limit: return client_id = response.source client = aff4.FACTORY.Open(client_id, token=self.token) hostname = client.Get(client.Schema.HOSTNAME) or "unknown hostname" subject = ("GRR Hunt results collection %s got a new result." % self.state.collection_urn) url = urllib.urlencode((("c", client_id), ("main", "HostInformation"))) response_htm = rendering.FindRendererForObject(response).RawHTML() self.state.emails_sent += 1 if self.state.emails_sent == self.state.args.email_limit: additional_message = self.too_many_mails_msg % self.state.args.email_limit else: additional_message = "" email_alerts.SendEmail( self.state.args.email, "grr-noreply", subject, self.template % dict( client_id=client_id, admin_ui=config_lib.CONFIG["AdminUI.url"], hostname=hostname, urn=url, creator=self.token.username, collection_urn=self.state.collection_urn, response=response_htm, additional_message=additional_message, signature=config_lib.CONFIG["Email.signature"] ), is_html=True) @utils.Synchronized def ProcessResponses(self, responses): for response in responses: self.ProcessResponse(response) class CSVOutputPluginArgs(rdfvalue.RDFProtoStruct): protobuf = flows_pb2.CSVOutputPluginArgs class CSVOutputPlugin(HuntOutputPlugin): """Hunt output plugin that writes hunt's results to CSV file on AFF4. CSV files are written incrementally. After every batch of results is written, the file can be downloaded. TODO(user): add support for zipped CSV files. Produce compressed CSV files while retaining the capability to do incremental updates and have files in downloadable state after every update is not exactly trivial. """ name = "csv" description = "Write CSV file to AFF4" args_type = CSVOutputPluginArgs def Initialize(self): super(CSVOutputPlugin, self).Initialize() self.state.Register("files_by_type", {}) self.state.Register("last_updated", rdfvalue.RDFDatetime().Now()) def ProcessResponses(self, responses): default_metadata = rdfvalue.ExportedMetadata( annotations=u",".join(self.state.args.export_options.annotations), source_urn=self.state.collection_urn) if self.state.args.convert_values: # This is thread-safe - we just convert the values. converted_responses = export.ConvertValues( default_metadata, responses, token=self.state.token, options=self.state.args.export_options) else: converted_responses = responses # This is not thread-safe, therefore WriteValueToCSVFile is synchronized. self.WriteValuesToCSVFile(converted_responses) def GetCSVHeader(self, value_class, prefix=""): header = [] for type_info in value_class.type_infos: if type_info.__class__.__name__ == "ProtoEmbedded": header.extend( self.GetCSVHeader(type_info.type, prefix=type_info.name + ".")) else: header.append(prefix + type_info.name) return header def WriteCSVHeader(self, output_file, value_type): value_class = rdfvalue.RDFValue.classes[value_type] csv.writer(output_file).writerow(self.GetCSVHeader(value_class)) def GetCSVRow(self, value): row = [] for type_info in value.__class__.type_infos: if type_info.__class__.__name__ == "ProtoEmbedded": row.extend(self.GetCSVRow(value.Get(type_info.name))) else: row.append(value.Get(type_info.name)) return row def WriteCSVRow(self, output_file, value): csv.writer(output_file).writerow(self.GetCSVRow(value)) def GetOutputFile(self, value_type): """Initializes output AFF4Image for a given value type.""" try: output_file = self.state.files_by_type[value_type] except KeyError: if self.state.args.output_dir: output_urn = self.state.args.output_dir.Add(value_type + ".csv") output_file = aff4.FACTORY.Create(output_urn, "AFF4Image", token=self.token) else: output_file = aff4.FACTORY.Create(None, "TempImageFile", token=self.token) output_file.urn = output_file.urn.Add(value_type + ".csv") self.WriteCSVHeader(output_file, value_type) self.state.files_by_type[value_type] = output_file return output_file @utils.Synchronized def WriteValuesToCSVFile(self, values): for value in values: output_file = self.GetOutputFile(value.__class__.__name__) self.WriteCSVRow(output_file, value) def Flush(self): for output_file in self.state.files_by_type.values(): output_file.Flush() self.last_updated = rdfvalue.RDFDatetime().Now() class OutputPlugin(rdfvalue.RDFProtoStruct): """A proto describing the output plugin to create.""" protobuf = flows_pb2.OutputPlugin def GetPluginArgsClass(self): plugin_cls = HuntOutputPlugin.classes.get(self.plugin_name) if plugin_cls is not None: return plugin_cls.args_type def GetPluginForHunt(self, hunt_obj): cls = HuntOutputPlugin.classes.get(self.plugin_name) if cls is None: raise KeyError("Unknown output plugin %s" % self.plugin_name) return cls(hunt_obj.state.context.results_collection_urn, args=self.plugin_args, token=hunt_obj.token) def GetPluginForState(self, plugin_state): cls = HuntOutputPlugin.classes.get(self.plugin_name) if cls is None: raise KeyError("Unknown output plugin %s" % self.plugin_name) return cls(None, state=plugin_state)
import json import logging import os import socket import StringIO import django from django.conf import settings from django.contrib.sites.models import Site from django.http import (HttpResponsePermanentRedirect, HttpResponseRedirect, HttpResponse, Http404) from django.shortcuts import render from django.utils.translation import ugettext as _ from django.views.decorators.cache import never_cache from django.views.decorators.http import require_GET from celery.messaging import establish_connection from mobility.decorators import mobile_template from PIL import Image from kitsune.lib.sumo_locales import LOCALES from kitsune.search import es_utils from kitsune.sumo.decorators import cors_enabled from kitsune.sumo.redis_utils import redis_client, RedisError from kitsune.sumo.urlresolvers import reverse from kitsune.sumo.utils import get_next_url, uselocale from kitsune.users.forms import AuthenticationForm log = logging.getLogger('k.services') @never_cache @mobile_template('sumo/{mobile/}locales.html') def locales(request, template): """The locale switcher page.""" return render(request, template, dict( next_url=get_next_url(request) or reverse('home'))) def geoip_suggestion(request): """ Ajax view to return the localized text for GeoIP locale change suggestion. Takes one parameter from the querystring: * locales - a form encoded list of locales to translate to. Example url: /localize?locales[]=es&locales[]=en-US """ locales = request.GET.getlist('locales[]') response = {'locales': {}} for locale in locales: # English and native names for the language response['locales'][locale] = LOCALES[locale] with uselocale(locale): # This is using our JS-style string formatting. response[locale] = { 'suggestion': _('Would you like to view this page in ' '%(language)s instead?'), 'confirm': _('Yes'), 'cancel': _('No'), } return HttpResponse(json.dumps(response), content_type='application/json') def handle403(request): """A 403 message that looks nicer than the normal Apache forbidden page""" no_cookies = False referer = request.META.get('HTTP_REFERER') if referer: no_cookies = (referer.endswith(reverse('users.login'))) return render(request, 'handlers/403.html', { 'form': AuthenticationForm(), 'no_cookies': no_cookies}, status=403) def handle404(request, *args, **kwargs): """A handler for 404s""" return render(request, 'handlers/404.html', status=404) def handle500(request): """A 500 message that looks nicer than the normal Apache error page""" return render(request, 'handlers/500.html', status=500) def redirect_to(request, url, permanent=True, **kwargs): """Like Django's redirect_to except that 'url' is passed to reverse.""" dest = reverse(url, kwargs=kwargs) if permanent: return HttpResponsePermanentRedirect(dest) return HttpResponseRedirect(dest) def deprecated_redirect(request, url, **kwargs): """Redirect with an interstitial page telling folks to update their bookmarks. """ dest = reverse(url, kwargs=kwargs) proto = 'https://' if request.is_secure() else 'http://' host = Site.objects.get_current().domain return render(request, 'sumo/deprecated.html', { 'dest': dest, 'proto': proto, 'host': host}) def robots(request): """Generate a robots.txt.""" if not settings.ENGAGE_ROBOTS: template = 'User-Agent: *\nDisallow: /' else: template = render(request, 'sumo/robots.html') return HttpResponse(template, content_type='text/plain') def test_memcached(host, port): """Connect to memcached. :returns: True if test passed, False if test failed. """ try: s = socket.socket() s.connect((host, port)) return True except Exception as exc: log.critical('Failed to connect to memcached (%r): %s' % ((host, port), exc)) return False finally: s.close() ERROR = 'ERROR' INFO = 'INFO' @never_cache def monitor(request): """View for services monitor.""" status = {} # Note: To add a new component to the services monitor, do your # testing and then add a name -> list of output tuples map to # status. # Check memcached. memcache_results = [] try: for cache_name, cache_props in settings.CACHES.items(): result = True backend = cache_props['BACKEND'] location = cache_props['LOCATION'] # LOCATION can be a string or a list of strings if isinstance(location, basestring): location = location.split(';') if 'memcache' in backend: for loc in location: # TODO: this doesn't handle unix: variant ip, port = loc.split(':') result = test_memcached(ip, int(port)) memcache_results.append( (INFO, '%s:%s %s' % (ip, port, result))) if not memcache_results: memcache_results.append((ERROR, 'memcache is not configured.')) elif len(memcache_results) < 2: memcache_results.append( (ERROR, ('You should have at least 2 memcache servers. ' 'You have %s.' % len(memcache_results)))) else: memcache_results.append((INFO, 'memcached servers look good.')) except Exception as exc: memcache_results.append( (ERROR, 'Exception while looking at memcached: %s' % str(exc))) status['memcached'] = memcache_results # Check Libraries and versions libraries_results = [] try: Image.new('RGB', (16, 16)).save(StringIO.StringIO(), 'JPEG') libraries_results.append((INFO, 'PIL+JPEG: Got it!')) except Exception as exc: libraries_results.append( (ERROR, 'PIL+JPEG: Probably missing: ' 'Failed to create a jpeg image: %s' % exc)) status['libraries'] = libraries_results # Check file paths. msg = 'We want read + write.' filepaths = ( (settings.USER_AVATAR_PATH, os.R_OK | os.W_OK, msg), (settings.IMAGE_UPLOAD_PATH, os.R_OK | os.W_OK, msg), (settings.THUMBNAIL_UPLOAD_PATH, os.R_OK | os.W_OK, msg), (settings.GALLERY_IMAGE_PATH, os.R_OK | os.W_OK, msg), (settings.GALLERY_IMAGE_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg), (settings.GALLERY_VIDEO_PATH, os.R_OK | os.W_OK, msg), (settings.GALLERY_VIDEO_THUMBNAIL_PATH, os.R_OK | os.W_OK, msg), (settings.GROUP_AVATAR_PATH, os.R_OK | os.W_OK, msg), ) filepath_results = [] for path, perms, notes in filepaths: path = os.path.join(settings.MEDIA_ROOT, path) path_exists = os.path.isdir(path) path_perms = os.access(path, perms) if path_exists and path_perms: filepath_results.append( (INFO, '%s: %s %s %s' % (path, path_exists, path_perms, notes))) status['filepaths'] = filepath_results # Check RabbitMQ. rabbitmq_results = [] try: rabbit_conn = establish_connection(connect_timeout=5) rabbit_conn.connect() rabbitmq_results.append( (INFO, 'Successfully connected to RabbitMQ.')) except (socket.error, IOError) as exc: rabbitmq_results.append( (ERROR, 'Error connecting to RabbitMQ: %s' % str(exc))) except Exception as exc: rabbitmq_results.append( (ERROR, 'Exception while looking at RabbitMQ: %s' % str(exc))) status['RabbitMQ'] = rabbitmq_results # Check ES. es_results = [] try: es_utils.get_doctype_stats(es_utils.all_read_indexes()[0]) es_results.append( (INFO, ('Successfully connected to ElasticSearch and index ' 'exists.'))) except es_utils.ES_EXCEPTIONS as exc: es_results.append( (ERROR, 'ElasticSearch problem: %s' % str(exc))) except Exception as exc: es_results.append( (ERROR, 'Exception while looking at ElasticSearch: %s' % str(exc))) status['ElasticSearch'] = es_results # Check Celery. # start = time.time() # pong = celery.task.ping() # rabbit_results = r = {'duration': time.time() - start} # status_summary['rabbit'] = pong == 'pong' and r['duration'] < 1 # Check Redis. redis_results = [] if hasattr(settings, 'REDIS_BACKENDS'): for backend in settings.REDIS_BACKENDS: try: redis_client(backend) redis_results.append((INFO, '%s: Pass!' % backend)) except RedisError: redis_results.append((ERROR, '%s: Fail!' % backend)) status['Redis'] = redis_results status_code = 200 status_summary = {} for component, output in status.items(): if ERROR in [item[0] for item in output]: status_code = 500 status_summary[component] = False else: status_summary[component] = True return render(request, 'services/monitor.html', { 'component_status': status, 'status_summary': status_summary}, status=status_code) @never_cache def error(request): if not getattr(settings, 'STAGE', False): raise Http404 # Do something stupid. fu # noqa @require_GET @never_cache def version_check(request): content_type = 'application/x-json' token = settings.VERSION_CHECK_TOKEN if (token is None or 'token' not in request.GET or token != request.GET['token']): return HttpResponse(status=403, content_type=content_type) versions = { 'django': '.'.join(map(str, django.VERSION)), } return HttpResponse(json.dumps(versions), content_type=content_type) @cors_enabled('*') def serve_cors(*args, **kwargs): """A wrapper around django.views.static.serve that adds CORS headers.""" if not settings.DEBUG: raise RuntimeError("Don't use kitsune.sumo.views.serve_cors " "in production.") from django.views.static import serve return serve(*args, **kwargs)
#!/usr/bin/env python import re,os from exceptions import IndexError from optparse import OptionParser import ConfigParser import numpy as N import numpy.fft as FT from scipy.optimize import brentq,fmin,anneal try: import tables HAS_TABLES=True except: HAS_TABLES=False def write_parameter_file(filename): config = ConfigParser.RawConfigParser() config.add_section('Spectrum') cmd_opt_dict = vars(options) for option in cmd_opt_dict: config.set('Spectrum', option, cmd_opt_dict[option]) cfgfile = open(filename,'w') config.write(cfgfile) cfgfile.close() return typ = re.compile("'.*'") try: import matplotlib.pyplot as P pyl = True except: pyl = None parser = OptionParser(version="%prog 0.5") parser.add_option("--area", dest="normalize_area", help="Normalize area under the spectrum", metavar="AREA", action="store_true", default=False) parser.add_option("-b", "--batch", dest="batch", help="No plots, batch processing", metavar="BATCH", action="store_true", default=False) parser.add_option("--baseline", type="int", dest="baseline", default=0, metavar="N", help="Apply baseline correction to the last N points") parser.add_option("-c", "--start", type="int", dest="start", default=-1, help="Start point of FFT") parser.add_option("-e", "--end", type="int", dest="end", default=-1, metavar="END", help="Skip points past END") parser.add_option("-f", "--file", dest="infilename", help="Read data from INFILE, can be HDF5 file", metavar="INFILE") parser.add_option("--filter", type="float", dest="filter", default=0, help="Low pass filter in nyquist frequency") parser.add_option("-l", "--lb", type="float", dest="lb", default=0, help="Line broadening factor for windowing function") parser.add_option("-m", "--method", dest="method", help="Phasing method: maxent, maxent2 or simple", default = "simple", metavar="METHOD") parser.add_option("--mask", dest="mask", help="Save only spectra from -280e3 to 280e3", metavar="MASK", action="store_true", default=False) parser.add_option("--maximum", dest="normalize_maximum", help="Normalize spectrum to maximum value", metavar="MAXIMUM", action="store_true", default=False) parser.add_option("-n", "--npoints", type="int", dest="npoints", default=0, help="Number of additional points to leave out (can be negative)") parser.add_option("-o", "--out", dest="outfilename", help="Write data to OUTFILE", metavar="OUTFILE") parser.add_option("--write-parameter-file", "--wpf", dest="parameterfilename", help="Write data to PARAMETERFILE", metavar="PARAMETERFILE") parser.add_option("-p", "--phase", type="float", dest="phase", default=None, help="Phase") parser.add_option("-r", "--read-parameter-file", dest="pfile", help="Write data to PARAMETERFILE", metavar="PARAMETERFILE") parser.add_option("-s", "--swap", dest="swapchannels", help="Swapping real and imaginary part. Usually ch0 is real and ch1 is imag", action="store_true", default=False) parser.add_option("--std", dest="standard", help="Do not ask for data set in HDF5 files", action="store_true", default=False) parser.add_option("-z", "--zero", type="int", dest="zero", default=-1, metavar='NUM', help="Filling with NUM zeroes, if NUM=0 no zero filling, NUM < 0 find points for fast FFT") (options, args) = parser.parse_args() if options.pfile: config = ConfigParser.RawConfigParser() config.read(options.pfile) opts_from_file = config.options('Spectrum') cmd_opt_dict = vars(options) exclude_from_override = ['infilename', 'outfilename', 'pfile','parameterfilename','swapchannels','batch'] opts_from_file_dict = {} for an_opt in cmd_opt_dict.keys(): an_opt_val = config.get('Spectrum', an_opt) opts_from_file_dict[an_opt]=an_opt_val for an_opt in cmd_opt_dict.keys(): # print cmd_opt_dict.keys() # print opts_from_file_dict.keys() if not an_opt in exclude_from_override: print "(INFO) Overriding %s with %s"%(an_opt, opts_from_file_dict[an_opt]) options.__dict__[an_opt] = opts_from_file_dict[an_opt] print "\n(INFO) Reading in file %s ...\n"%(options.infilename) attributes = {} tau = 0 num_max = 0 if HAS_TABLES: if tables.isHDF5File(options.infilename): NOT_HDF=False h = tables.openFile(options.infilename) table_list = [f for f in h.walkGroups(h.root.data_pool) if f._v_children.has_key('accu_data')] print "Found following accu_data objects:\n\n" for i,tb in enumerate(table_list): print "\tNumber:",i, tb for key in tb._v_attrs._v_attrnamesuser: val = tb._f_getAttr(key) print "\t\t",key, '\t',val if key.endswith('tau'): if float(tb._f_getAttr(key)) > float(tau): #print "*** Was",tau tau = val #print "*** Now",tau num_max = i print if len(table_list) > 1: if options.standard: d=num_max else: d = raw_input('Which one?: [%i]'%num_max) else: d = 0 if d == '': d = num_max else: d = int(d) print "Using Number %i ..."%d for attribute in table_list[d]._v_attrs._v_attrnamesuser: attributes[attribute] = table_list[d]._f_getAttr(attribute) timeline = table_list[d].accu_data.read() dwell = table_list[d].indices.col('dwelltime') x = N.arange(timeline.shape[0])*dwell rmean = timeline[:,0] if timeline.shape[1] > 2: imean = timeline[:,2] else: imean = timeline[:,1] else: NOT_HDF=True if NOT_HDF or not HAS_TABLES: skiprows=0 comments="#" f = open(options.infilename, "U") line=f.readline().strip() if line.startswith("SIMP"): print "(INFO) SIMPSON file found" comments="END" f.seek(0) while line[0].isalpha(): line = f.readline().strip() skiprows += 1 if line.startswith("SW="): sw=float(line[3:]) print "(INFO) Spectral width: %.1f MHz"%(sw/1e6) f.close() datafile = N.loadtxt(options.infilename, skiprows=skiprows, comments=comments) print "(INFO) Data array has shape:",datafile.shape if options.batch: datasets = "1" else: datasets = raw_input("How many datasets are there?: ") if datasets == "1" or datasets == '': usethis=0 datasets=1 else: usethis = int(raw_input("Which one to use (0 to %i)?: "%(datasets-1))) datasets = int(datasets) num = datafile.shape[0]/int(datasets) s = num*usethis e = num*(usethis+1) if datafile.shape[1] == 5: x = datafile[s:e,0] rmean = datafile[s:e,1] rsigma = datafile[s:e,2] imean = datafile[s:e,3] isignam = datafile[s:e,4] dwell = x[1]-x[0] elif datafile.shape[1] == 3: x = datafile[s:e,0] rmean = datafile[s:e,1] imean = datafile[s:e,2] dwell = x[1]-x[0] elif datafile.shape[1] == 2: x = N.linspace(0,num/sw,num) rmean = datafile[s:e,0] imean = datafile[s:e,1] dwell = 1/sw else: raise ValueError # not needed anymore del datafile if options.swapchannels: temp = rmean[:] rmean = imean[:] imean = temp[:] del temp # Speed up FFT by estimating a good number of points def find_good_npoints(n): fft_len=1<<int(N.floor(N.log2(n))) if fft_len%2==0 and fft_len/2*3<=n: fft_len=fft_len/2*3 # 1.50 if fft_len%512==0 and fft_len/512*729<=n: fft_len=fft_len/512*729 # 1.42 if fft_len%64==0 and fft_len/64*81<=n: fft_len=fft_len/64*81 #1.26 if fft_len%8==0 and fft_len/8*9<=n: fft_len=fft_len/8*9 # 1.125 return fft_len def filter(data, freq): import scipy.signal as S b,a = S.butter(7,freq) data.real = S.filtfilt(b,a,data.real) data.imag = S.filtfilt(b,a,data.imag) return data # Data filtering data = 1j*N.array(imean)+N.array(rmean) if options.filter > 0: print "(INFO) Filtering data with low pass filter: %.3f Hz"%(options.filter) data = filter(data, options.filter*dwell) extra_points=int(options.npoints) if int(options.start) >= 0: r_start = int(options.start) else: r_start = data.real.argmax()+extra_points r_end = int(options.end) print "Skipping first %i points of data"%r_start if r_start > len(data): raise IndexError,"More points left out than data points exist!" usable_data = data[r_start:r_end] if options.baseline > 0: usable_data -= data[-int(options.baseline):].mean() n = len(usable_data) if options.zero > 0: fft_len=find_good_npoints(len(usable_data)+int(options.zero)) elif options.zero == 0: fft_len=len(usable_data)#2**N.int(N.ceil(N.log2(len(usable_data)*16))) else: fft_len=find_good_npoints(len(usable_data)) print "Finding good number of points for faster FFT: %i (was %i)"%(fft_len,len(usable_data)) print "Using only %.3f parts of signal"%(1.0/(float(len(usable_data))/fft_len)) def shannon(spectrum): # h = N.abs((spectrum.real[:-4]-8*spectrum.real[1:-3]+8*spectrum.real[3:-1]-2*spectrum.real[4:])/(12*dwell)) # second derivative of real part of spectrum h = N.abs(N.diff(spectrum.real,2)) h = h.compress(h>0) h/=h.sum() entrop = N.sum(-h*N.log(h)) return entrop def penalty(spectrum): r = spectrum.real r = r.compress(r<0) return N.dot(r,r) def entropy(phi, spectrum, gamma): """ Calculates the entropy of the spectrum (real part). p = phase TODO: gamma should be adjusted such that the penalty and entropy are in the same magnitude """ # x = N.linspace(0,1,len(spectrum)) Re = spectrum*N.exp(1j*phi) en_shannon = shannon(Re)+penalty(Re)*gamma return en_shannon def entropy_order2(phi, spectrum, gamma): """ Calculates the entropy of the spectrum (real part). phi = phase1, phase2 gamma should be adjusted such that the penalty and entropy are in the same magnitude """ # x = N.linspace(0,1,len(spectrum)) Re = spectrum*N.exp(1j* ( phi[0] + phi[1]*N.linspace(0,1,len(spectrum)))) en_shannon = shannon(Re) + penalty(Re)*gamma return en_shannon # windows from D. Traficante # signal enhancing def trafs_window(data, LW=10): n = len(data) t = dwell * N.arange(n) AT = t.max() E = N.exp(-t*N.pi*LW) e = N.exp((t-AT)*N.pi*LW) apod = E apod = (E**2*(E+e)/(E**3+e**3)) Ep = E[t>=(1/LW)] apod[t>=(1/LW)]=Ep if not options.batch: P.plot(apod) P.plot(usable_data.real/usable_data.real.max()) P.plot(apod*usable_data.real/(apod*usable_data).real.max()) P.legend() P.show() return data*apod # resolution enhancing def trafr_window(data, LW=10): n = len(data) t = dwell * N.arange(n) AT = t.max() E = N.exp(-t*N.pi*LW) e = N.exp((t-AT)*N.pi*LW) apod = E[:] apod[ x<1/LW ] = E**2/(E**3+e**3) if not options.batch: P.plot(apod) P.plot(usable_data.real/usable_data.real.max()) P.plot(apod*usable_data.real/(apod*usable_data).real.max()) P.legend() P.show() return apod*data def exp_window(data,LW): n = len(data) t = dwell * N.arange(n) apod = N.exp(-t*2*N.pi*LW) if not options.batch: P.plot(apod) P.plot(usable_data.real/usable_data.real.max(), label="Original") P.plot(apod*usable_data.real/(apod*usable_data).real.max(), label="Windowed") P.legend() P.show() return apod*data # The simple approach def phase(phi, signal_in): # signal is a part of the signal (imaginary) first_point = (signal_in[0]*N.exp(1j*phi)).imag #print first_point return first_point def simple_phase(signal_in): # using bisect or ridder also possible phi_correction = brentq(phase, -N.pi/2, N.pi/2, args=(signal_in)) return phi_correction print "Phase given:",options.phase if options.method == 'simple' and not options.phase: #phi = simple_phase(usable_data) phi = simple_phase(usable_data) elif options.method == 'maxent2' and not options.phase: # phasing with entropy # starting point x0 = [12,-100] fastft = FT.fftshift(FT.fft(usable_data, fft_len)) # Estimating gamma # print shannon(fastft),penalty(fastft) gamma = shannon(fastft)/penalty(fastft) print "Gamma estimmated to %.2e"%gamma phi = fmin(entropy_order2, x0, args=(fastft,gamma)) #phi = anneal(entropy, x0, args=(fastft,gamma,dwell), # lower = -N.pi/2, # upper = N.pi/2, # learn_rate = 0.9, # maxiter = 1000, # dwell = 100)[0] elif options.method == 'maxent' and not options.phase: # phasing with entropy # starting point x0 = simple_phase(usable_data) fastft = FT.fftshift(FT.fft(usable_data, fft_len)) # Estimating gamma # print shannon(fastft),penalty(fastft) gamma = shannon(fastft)/penalty(fastft) print "Gamma estimmated to %.2e"%gamma phi = fmin(entropy, x0, args=(fastft,gamma)) # phi = anneal(entropy, x0, args=(fastft,gamma,dwell), # lower = -N.pi/2, # upper = N.pi/2, # learn_rate = 0.9, # maxiter = 1000, # dwell = 100)[0] elif options.phase: phi = float(options.phase)*N.pi/180.0 try: x = N.linspace(0,1,len(usable_data)) # second order phase correction usable_data *= N.exp(1j*(phi[0] + x*phi[1])) except: # first order phase correction usable_data *= N.exp(1j*phi) options.phase = phi # Turn data by 180 if maximum < 0 if usable_data.real[0] < 0: usable_data*=N.exp(1j*N.pi) print "Phasing data (%s):"%(options.method),(phi/N.pi*180.0)%360.0 # Data windowing if float(options.lb) > 0: print "Windowing data", options.lb usable_data = exp_window(usable_data, float(options.lb)) print "FFT data ..." fastft = FT.fftshift(FT.fft(usable_data, fft_len)) freqs = FT.fftshift(FT.fftfreq(fft_len,dwell)) # baseline correction of the spectrum #print "Baseline correction of the spectrum" #base = N.mean([fastft[-64:].mean(),fastft[:64].mean()]) #fastft -= base if str(options.normalize_maximum) == 'True': mask_max = ( -280e3 < freqs ) & ( freqs < 280e3 ) print "Normalize to maximum intensity" fastft /= fastft.real[mask_max].max() if str(options.normalize_area) == 'True': print "Normalize to area" fastft /= fastft.real.sum() mask = N.ones(len(freqs), dtype='bool') if str(options.mask) == 'True': print "Spectrum from -280e3 to 280e3 kHz" mask = ( -280e3 < freqs ) & ( freqs < 280e3 ) if not options.batch: print "Trying to plot data ..." P.subplot(211) x = N.arange(len(usable_data))*dwell/1e-6 P.plot(x,usable_data.real,'r',label="Real") P.plot(x,usable_data.imag,'b',label="Imag") P.xlabel('t/us') P.ylabel('Signal/a.u.') P.legend() P.subplot(212) if str(options.mask) == 'True': P.plot(freqs[mask]/1e3, fastft.real[mask]) else: P.plot(freqs/1e3, fastft.real) null = (freqs == 0) P.plot(freqs[null], fastft.real[null], 'r.', ms=3) P.xlabel('Frequency/kHz') P.ylabel('Signal/a.u.') P.ylim(fastft.real.min() - 0.1*fastft.real.min(), fastft.real.max() + 0.05*fastft.real.max()) P.show() if options.parameterfilename: options.start = r_start # store start point explicitly print "Writing parameters to %s"%(options.parameterfilename) write_parameter_file(options.parameterfilename) if options.outfilename: print "Writing spectrum to %s"%(options.outfilename) out = open(options.outfilename,'w') out.write("# FFT spectrum from file %s\n"%(options.infilename)) if len(attributes.keys()) > 0: out.write("# %s\n"%(table_list[d])) field_length = 0 for key in attributes.keys(): if len(key) > field_length: field_length = len(key) for key in attributes.keys(): out.write('# %-*s %-*s\n'%(field_length,key,field_length,attributes[key])) out.write('#%9s %9s %9s\n'%("t","real","imag")) N.savetxt(out,N.array([freqs[mask],fastft.real[mask],fastft.imag[mask]]).T, fmt="%.4e") out.close() # save paramter file too parfile = os.path.splitext(options.outfilename)[0]+'.par' print "Writing parameters to %s"%(parfile) write_parameter_file(parfile) print "done!"
import os from nose.plugins.skip import SkipTest if os.name == "nt": raise SkipTest("Skipped on Windows.") import sys import libmproxy.console.contentview as cv from libmproxy import utils, flow, encoding import tutils try: import pyamf except ImportError: pyamf = None try: import cssutils except: cssutils = None class TestContentView: def test_trailer(self): txt = [] cv.trailer(5, txt, 1000) assert not txt cv.trailer(cv.VIEW_CUTOFF + 10, txt, cv.VIEW_CUTOFF) assert txt def test_view_auto(self): v = cv.ViewAuto() f = v( flow.ODictCaseless(), "foo", 1000 ) assert f[0] == "Raw" f = v( flow.ODictCaseless( [["content-type", "text/html"]], ), "<html></html>", 1000 ) assert f[0] == "HTML" f = v( flow.ODictCaseless( [["content-type", "text/flibble"]], ), "foo", 1000 ) assert f[0] == "Raw" f = v( flow.ODictCaseless( [["content-type", "text/flibble"]], ), "<xml></xml>", 1000 ) assert f[0].startswith("XML") def test_view_urlencoded(self): d = utils.urlencode([("one", "two"), ("three", "four")]) v = cv.ViewURLEncoded() assert v([], d, 100) d = utils.urlencode([("adsfa", "")]) v = cv.ViewURLEncoded() assert v([], d, 100) def test_view_html(self): v = cv.ViewHTML() s = "<html><br><br></br><p>one</p></html>" assert v([], s, 1000) s = "gobbledygook" assert not v([], s, 1000) def test_view_html_outline(self): v = cv.ViewHTMLOutline() s = "<html><br><br></br><p>one</p></html>" assert v([], s, 1000) def test_view_json(self): cv.VIEW_CUTOFF = 100 v = cv.ViewJSON() assert v([], "{}", 1000) assert not v([], "{", 1000) assert v([], "[" + ",".join(["0"]*cv.VIEW_CUTOFF) + "]", 1000) assert v([], "[1, 2, 3, 4, 5]", 5) def test_view_xml(self): v = cv.ViewXML() assert v([], "<foo></foo>", 1000) assert not v([], "<foo>", 1000) s = """<?xml version="1.0" encoding="UTF-8"?> <?xml-stylesheet title="XSL_formatting"?> <rss xmlns:media="http://search.yahoo.com/mrss/" xmlns:atom="http://www.w3.org/2005/Atom" version="2.0"> </rss> """ assert v([], s, 1000) def test_view_raw(self): v = cv.ViewRaw() assert v([], "foo", 1000) def test_view_javascript(self): v = cv.ViewJavaScript() assert v([], "[1, 2, 3]", 100) assert v([], "[1, 2, 3", 100) assert v([], "function(a){[1, 2, 3]}", 100) def test_view_css(self): v = cv.ViewCSS() with open(tutils.test_data.path('data/1.css'), 'r') as fp: fixture_1 = fp.read() result = v([], 'a', 100) if cssutils: assert len(result[1]) == 0 else: assert len(result[1]) == 1 result = v([], fixture_1, 100) if cssutils: assert len(result[1]) > 1 else: assert len(result[1]) == 1 def test_view_hex(self): v = cv.ViewHex() assert v([], "foo", 1000) def test_view_image(self): v = cv.ViewImage() p = tutils.test_data.path("data/image.png") assert v([], file(p,"rb").read(), sys.maxint) p = tutils.test_data.path("data/image.gif") assert v([], file(p,"rb").read(), sys.maxint) p = tutils.test_data.path("data/image-err1.jpg") assert v([], file(p,"rb").read(), sys.maxint) p = tutils.test_data.path("data/image.ico") assert v([], file(p,"rb").read(), sys.maxint) assert not v([], "flibble", sys.maxint) def test_view_multipart(self): view = cv.ViewMultipart() v = """ --AaB03x Content-Disposition: form-data; name="submit-name" Larry --AaB03x """.strip() h = flow.ODictCaseless( [("Content-Type", "multipart/form-data; boundary=AaB03x")] ) assert view(h, v, 1000) h = flow.ODictCaseless() assert not view(h, v, 1000) h = flow.ODictCaseless( [("Content-Type", "multipart/form-data")] ) assert not view(h, v, 1000) h = flow.ODictCaseless( [("Content-Type", "unparseable")] ) assert not view(h, v, 1000) def test_get_content_view(self): r = cv.get_content_view( cv.get("Raw"), [["content-type", "application/json"]], "[1, 2, 3]", 1000, lambda x, l: None, False ) assert "Raw" in r[0] r = cv.get_content_view( cv.get("Auto"), [["content-type", "application/json"]], "[1, 2, 3]", 1000, lambda x, l: None, False ) assert r[0] == "JSON" r = cv.get_content_view( cv.get("Auto"), [["content-type", "application/json"]], "[1, 2", 1000, lambda x, l: None, False ) assert "Raw" in r[0] r = cv.get_content_view( cv.get("AMF"), [], "[1, 2", 1000, lambda x, l: None, False ) assert "Raw" in r[0] r = cv.get_content_view( cv.get("Auto"), [ ["content-type", "application/json"], ["content-encoding", "gzip"] ], encoding.encode('gzip', "[1, 2, 3]"), 1000, lambda x, l: None, False ) assert "decoded gzip" in r[0] assert "JSON" in r[0] r = cv.get_content_view( cv.get("XML"), [ ["content-type", "application/json"], ["content-encoding", "gzip"] ], encoding.encode('gzip', "[1, 2, 3]"), 1000, lambda x, l: None, False ) assert "decoded gzip" in r[0] assert "Raw" in r[0] if pyamf: def test_view_amf_request(): v = cv.ViewAMF() p = tutils.test_data.path("data/amf01") assert v([], file(p,"rb").read(), sys.maxint) p = tutils.test_data.path("data/amf02") assert v([], file(p,"rb").read(), sys.maxint) def test_view_amf_response(): v = cv.ViewAMF() p = tutils.test_data.path("data/amf03") assert v([], file(p,"rb").read(), sys.maxint) if cv.ViewProtobuf.is_available(): def test_view_protobuf_request(): v = cv.ViewProtobuf() p = tutils.test_data.path("data/protobuf01") content_type, output = v([], file(p,"rb").read(), sys.maxint) assert content_type == "Protobuf" assert output[0].text == '1: "3bbc333c-e61c-433b-819a-0b9a8cc103b8"' def test_get_by_shortcut(): assert cv.get_by_shortcut("h")
from django.db.models import Q from django.contrib import messages from django.shortcuts import redirect from django.utils.translation import ugettext_lazy as _ from django.core.urlresolvers import reverse from django.views.generic import ListView, DetailView, DeleteView, \ UpdateView, FormView from django.views.generic.detail import SingleObjectMixin from oscar.apps.customer.utils import normalise_email from oscar.views.generic import BulkEditMixin from oscar.core.compat import get_user_model from oscar.core.loading import get_class, get_classes, get_model UserSearchForm, ProductAlertSearchForm, ProductAlertUpdateForm = get_classes( 'dashboard.users.forms', ('UserSearchForm', 'ProductAlertSearchForm', 'ProductAlertUpdateForm')) PasswordResetForm = get_class('customer.forms', 'PasswordResetForm') ProductAlert = get_model('customer', 'ProductAlert') User = get_user_model() class IndexView(BulkEditMixin, ListView): template_name = 'dashboard/users/index.html' paginate_by = 25 model = User actions = ('make_active', 'make_inactive', ) form_class = UserSearchForm desc_template = _('%(main_filter)s %(email_filter)s %(name_filter)s') description = '' context_object_name = 'user_list' def get_queryset(self): queryset = self.model.objects.all().order_by('-date_joined') self.desc_ctx = { 'main_filter': _('All users'), 'email_filter': '', 'name_filter': '', } if 'email' not in self.request.GET: self.form = self.form_class() return queryset self.form = self.form_class(self.request.GET) if not self.form.is_valid(): return queryset data = self.form.cleaned_data if data['email']: email = normalise_email(data['email']) queryset = queryset.filter(email__istartswith=email) self.desc_ctx['email_filter'] \ = _(" with email matching '%s'") % email if data['name']: # If the value is two words, then assume they are first name and # last name parts = data['name'].split() if len(parts) == 2: condition = Q(first_name__istartswith=parts[0]) \ | Q(last_name__istartswith=parts[1]) else: condition = Q(first_name__istartswith=data['name']) \ | Q(last_name__istartswith=data['name']) queryset = queryset.filter(condition).distinct() self.desc_ctx['name_filter'] \ = _(" with name matching '%s'") % data['name'] return queryset def get_context_data(self, **kwargs): context = super(IndexView, self).get_context_data(**kwargs) context['form'] = self.form context['queryset_description'] = self.desc_template % self.desc_ctx return context def make_inactive(self, request, users): return self._change_users_active_status(users, False) def make_active(self, request, users): return self._change_users_active_status(users, True) def _change_users_active_status(self, users, value): for user in users: if not user.is_superuser: user.is_active = value user.save() messages.info(self.request, _("Users' status successfully changed")) return redirect('dashboard:users-index') class UserDetailView(DetailView): template_name = 'dashboard/users/detail.html' model = User context_object_name = 'customer' class PasswordResetView(SingleObjectMixin, FormView): form_class = PasswordResetForm http_method_names = ['post'] model = User def post(self, request, *args, **kwargs): self.object = self.get_object() return super(PasswordResetView, self).post(request, *args, **kwargs) def get_form_kwargs(self): kwargs = super(PasswordResetView, self).get_form_kwargs() kwargs['data'] = {'email': self.object.email} return kwargs def form_valid(self, form): # The PasswordResetForm's save method sends the reset email form.save(request=self.request) return super(PasswordResetView, self).form_valid(form) def get_success_url(self): messages.success( self.request, _("A password reset email has been sent")) return reverse( 'dashboard:user-detail', kwargs={'pk': self.object.id} ) class ProductAlertListView(ListView): model = ProductAlert form_class = ProductAlertSearchForm context_object_name = 'alerts' template_name = 'dashboard/users/alerts/list.html' paginate_by = 20 base_description = _('All Alerts') description = '' def get_queryset(self): queryset = self.model.objects.all() self.description = self.base_description self.form = self.form_class(self.request.GET) if not self.form.is_valid(): return queryset data = self.form.cleaned_data if data['status']: queryset = queryset.filter(status=data['status']).distinct() self.description \ += _(" with status matching '%s'") % data['status'] if data['name']: # If the value is two words, then assume they are first name and # last name parts = data['name'].split() if len(parts) >= 2: queryset = queryset.filter( user__first_name__istartswith=parts[0], user__last_name__istartswith=parts[1] ).distinct() else: queryset = queryset.filter( Q(user__first_name__istartswith=parts[0]) | Q(user__last_name__istartswith=parts[-1]) ).distinct() self.description \ += _(" with customer name matching '%s'") % data['name'] if data['email']: queryset = queryset.filter( Q(user__email__icontains=data['email']) | Q(email__icontains=data['email']) ) self.description \ += _(" with customer email matching '%s'") % data['email'] return queryset def get_context_data(self, **kwargs): context = super(ProductAlertListView, self).get_context_data(**kwargs) context['form'] = self.form context['queryset_description'] = self.description return context class ProductAlertUpdateView(UpdateView): template_name = 'dashboard/users/alerts/update.html' model = ProductAlert form_class = ProductAlertUpdateForm context_object_name = 'alert' def get_success_url(self): messages.success(self.request, _("Product alert saved")) return reverse('dashboard:user-alert-list') class ProductAlertDeleteView(DeleteView): model = ProductAlert template_name = 'dashboard/users/alerts/delete.html' context_object_name = 'alert' def get_success_url(self): messages.warning(self.request, _("Product alert deleted")) return reverse('dashboard:user-alert-list')
# Copyright (c) 2015 Ansible, Inc. # All Rights Reserved. # Python import datetime import logging from optparse import make_option # Django from django.core.management.base import NoArgsCommand, CommandError from django.db import transaction from django.utils.timezone import now # AWX from awx.main.models import ( Job, AdHocCommand, ProjectUpdate, InventoryUpdate, SystemJob, WorkflowJob, Notification ) from awx.main.signals import ( # noqa emit_update_inventory_on_created_or_deleted, emit_update_inventory_computed_fields, disable_activity_stream, disable_computed_fields ) from django.db.models.signals import post_save, post_delete, m2m_changed # noqa class Command(NoArgsCommand): ''' Management command to cleanup old jobs and project updates. ''' help = 'Remove old jobs, project and inventory updates from the database.' option_list = NoArgsCommand.option_list + ( make_option('--days', dest='days', type='int', default=90, metavar='N', help='Remove jobs/updates executed more than N days ago. Defaults to 90.'), make_option('--dry-run', dest='dry_run', action='store_true', default=False, help='Dry run mode (show items that would ' 'be removed)'), make_option('--jobs', dest='only_jobs', action='store_true', default=False, help='Remove jobs'), make_option('--ad-hoc-commands', dest='only_ad_hoc_commands', action='store_true', default=False, help='Remove ad hoc commands'), make_option('--project-updates', dest='only_project_updates', action='store_true', default=False, help='Remove project updates'), make_option('--inventory-updates', dest='only_inventory_updates', action='store_true', default=False, help='Remove inventory updates'), make_option('--management-jobs', default=False, action='store_true', dest='only_management_jobs', help='Remove management jobs'), make_option('--notifications', dest='only_notifications', action='store_true', default=False, help='Remove notifications'), make_option('--workflow-jobs', default=False, action='store_true', dest='only_workflow_jobs', help='Remove workflow jobs') ) def cleanup_jobs(self): #jobs_qs = Job.objects.exclude(status__in=('pending', 'running')) #jobs_qs = jobs_qs.filter(created__lte=self.cutoff) skipped, deleted = 0, 0 jobs = Job.objects.filter(created__lt=self.cutoff) for job in jobs.iterator(): job_display = '"%s" (%d host summaries, %d events)' % \ (unicode(job), job.job_host_summaries.count(), job.job_events.count()) if job.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s job %s', action_text, job.status, job_display) skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, job_display) if not self.dry_run: job.delete() deleted += 1 skipped += Job.objects.filter(created__gte=self.cutoff).count() return skipped, deleted def cleanup_ad_hoc_commands(self): skipped, deleted = 0, 0 ad_hoc_commands = AdHocCommand.objects.filter(created__lt=self.cutoff) for ad_hoc_command in ad_hoc_commands.iterator(): ad_hoc_command_display = '"%s" (%d events)' % \ (unicode(ad_hoc_command), ad_hoc_command.ad_hoc_command_events.count()) if ad_hoc_command.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s ad hoc command %s', action_text, ad_hoc_command.status, ad_hoc_command_display) skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, ad_hoc_command_display) if not self.dry_run: ad_hoc_command.delete() deleted += 1 skipped += AdHocCommand.objects.filter(created__gte=self.cutoff).count() return skipped, deleted def cleanup_project_updates(self): skipped, deleted = 0, 0 project_updates = ProjectUpdate.objects.filter(created__lt=self.cutoff) for pu in project_updates.iterator(): pu_display = '"%s" (type %s)' % (unicode(pu), unicode(pu.launch_type)) if pu.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s project update %s', action_text, pu.status, pu_display) skipped += 1 elif pu in (pu.project.current_update, pu.project.last_update) and pu.project.scm_type: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, pu_display) skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, pu_display) if not self.dry_run: pu.delete() deleted += 1 skipped += ProjectUpdate.objects.filter(created__gte=self.cutoff).count() return skipped, deleted def cleanup_inventory_updates(self): skipped, deleted = 0, 0 inventory_updates = InventoryUpdate.objects.filter(created__lt=self.cutoff) for iu in inventory_updates.iterator(): iu_display = '"%s" (source %s)' % (unicode(iu), unicode(iu.source)) if iu.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s inventory update %s', action_text, iu.status, iu_display) skipped += 1 elif iu in (iu.inventory_source.current_update, iu.inventory_source.last_update) and iu.inventory_source.source: action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s', action_text, iu_display) skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, iu_display) if not self.dry_run: iu.delete() deleted += 1 skipped += InventoryUpdate.objects.filter(created__gte=self.cutoff).count() return skipped, deleted def cleanup_management_jobs(self): skipped, deleted = 0, 0 system_jobs = SystemJob.objects.filter(created__lt=self.cutoff) for sj in system_jobs.iterator(): sj_display = '"%s" (type %s)' % (unicode(sj), unicode(sj.job_type)) if sj.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s system_job %s', action_text, sj.status, sj_display) skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, sj_display) if not self.dry_run: sj.delete() deleted += 1 skipped += SystemJob.objects.filter(created__gte=self.cutoff).count() return skipped, deleted def init_logging(self): log_levels = dict(enumerate([logging.ERROR, logging.INFO, logging.DEBUG, 0])) self.logger = logging.getLogger('awx.main.commands.cleanup_jobs') self.logger.setLevel(log_levels.get(self.verbosity, 0)) handler = logging.StreamHandler() handler.setFormatter(logging.Formatter('%(message)s')) self.logger.addHandler(handler) self.logger.propagate = False def cleanup_workflow_jobs(self): skipped, deleted = 0, 0 workflow_jobs = WorkflowJob.objects.filter(created__lt=self.cutoff) for workflow_job in workflow_jobs.iterator(): workflow_job_display = '"{}" ({} nodes)'.format( unicode(workflow_job), workflow_job.workflow_nodes.count()) if workflow_job.status in ('pending', 'waiting', 'running'): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s job %s', action_text, workflow_job.status, workflow_job_display) skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, workflow_job_display) if not self.dry_run: workflow_job.delete() deleted += 1 skipped += WorkflowJob.objects.filter(created__gte=self.cutoff).count() return skipped, deleted def cleanup_notifications(self): skipped, deleted = 0, 0 notifications = Notification.objects.filter(created__lt=self.cutoff) for notification in notifications.iterator(): notification_display = '"{}" (started {}, {} type, {} sent)'.format( unicode(notification), unicode(notification.created), notification.notification_type, notification.notifications_sent) if notification.status in ('pending',): action_text = 'would skip' if self.dry_run else 'skipping' self.logger.debug('%s %s notification %s', action_text, notification.status, notification_display) skipped += 1 else: action_text = 'would delete' if self.dry_run else 'deleting' self.logger.info('%s %s', action_text, notification_display) if not self.dry_run: notification.delete() deleted += 1 skipped += Notification.objects.filter(created__gte=self.cutoff).count() return skipped, deleted @transaction.atomic def handle_noargs(self, **options): self.verbosity = int(options.get('verbosity', 1)) self.init_logging() self.days = int(options.get('days', 90)) self.dry_run = bool(options.get('dry_run', False)) try: self.cutoff = now() - datetime.timedelta(days=self.days) except OverflowError: raise CommandError('--days specified is too large. Try something less than 99999 (about 270 years).') model_names = ('jobs', 'ad_hoc_commands', 'project_updates', 'inventory_updates', 'management_jobs', 'workflow_jobs', 'notifications') models_to_cleanup = set() for m in model_names: if options.get('only_%s' % m, False): models_to_cleanup.add(m) if not models_to_cleanup: models_to_cleanup.update(model_names) with disable_activity_stream(), disable_computed_fields(): for m in model_names: if m in models_to_cleanup: skipped, deleted = getattr(self, 'cleanup_%s' % m)() if self.dry_run: self.logger.log(99, '%s: %d would be deleted, %d would be skipped.', m.replace('_', ' '), deleted, skipped) else: self.logger.log(99, '%s: %d deleted, %d skipped.', m.replace('_', ' '), deleted, skipped)
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'mw_wav2dat.ui' # # Created by: PyQt5 UI code generator 5.10.1 # # WARNING! All changes made in this file will be lost! from qtpy import QtCore, QtGui, QtWidgets class Ui_MainWindow(object): def setupUi(self, MainWindow): MainWindow.setObjectName("MainWindow") MainWindow.resize(550, 450) MainWindow.setMinimumSize(QtCore.QSize(550, 450)) MainWindow.setMaximumSize(QtCore.QSize(553, 450)) self.centralwidget = QtWidgets.QWidget(MainWindow) self.centralwidget.setObjectName("centralwidget") self.gridLayout = QtWidgets.QGridLayout(self.centralwidget) self.gridLayout.setObjectName("gridLayout") self.label = QtWidgets.QLabel(self.centralwidget) self.label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label.setObjectName("label") self.gridLayout.addWidget(self.label, 0, 0, 1, 1) self.line_1 = QtWidgets.QFrame(self.centralwidget) self.line_1.setFrameShape(QtWidgets.QFrame.HLine) self.line_1.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_1.setObjectName("line_1") self.gridLayout.addWidget(self.line_1, 3, 0, 1, 5) self.progressBar = QtWidgets.QProgressBar(self.centralwidget) self.progressBar.setMinimumSize(QtCore.QSize(200, 0)) self.progressBar.setMaximumSize(QtCore.QSize(200, 16777215)) self.progressBar.setProperty("value", 0) self.progressBar.setObjectName("progressBar") self.gridLayout.addWidget(self.progressBar, 9, 0, 1, 1) self.label_10 = QtWidgets.QLabel(self.centralwidget) self.label_10.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_10.setObjectName("label_10") self.gridLayout.addWidget(self.label_10, 6, 0, 1, 1) self.groupBox_options = QtWidgets.QGroupBox(self.centralwidget) self.groupBox_options.setObjectName("groupBox_options") self.gridLayout_3 = QtWidgets.QGridLayout(self.groupBox_options) self.gridLayout_3.setObjectName("gridLayout_3") self.checkBox_information = QtWidgets.QCheckBox(self.groupBox_options) self.checkBox_information.setChecked(True) self.checkBox_information.setObjectName("checkBox_information") self.gridLayout_3.addWidget(self.checkBox_information, 0, 0, 1, 2) self.spinBox = QtWidgets.QSpinBox(self.groupBox_options) self.spinBox.setObjectName("spinBox") self.gridLayout_3.addWidget(self.spinBox, 0, 3, 1, 1) self.checkBox_same_name = QtWidgets.QCheckBox(self.groupBox_options) self.checkBox_same_name.setChecked(False) self.checkBox_same_name.setObjectName("checkBox_same_name") self.gridLayout_3.addWidget(self.checkBox_same_name, 1, 0, 1, 1) self.label_8 = QtWidgets.QLabel(self.groupBox_options) self.label_8.setObjectName("label_8") self.gridLayout_3.addWidget(self.label_8, 0, 2, 1, 1) self.gridLayout.addWidget(self.groupBox_options, 4, 0, 1, 5) self.line = QtWidgets.QFrame(self.centralwidget) self.line.setFrameShape(QtWidgets.QFrame.HLine) self.line.setFrameShadow(QtWidgets.QFrame.Sunken) self.line.setObjectName("line") self.gridLayout.addWidget(self.line, 8, 0, 1, 5) self.lineEdit_path_to_read = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_path_to_read.setMinimumSize(QtCore.QSize(300, 0)) self.lineEdit_path_to_read.setObjectName("lineEdit_path_to_read") self.gridLayout.addWidget(self.lineEdit_path_to_read, 0, 1, 1, 3) self.lineEdit_path_to_write = QtWidgets.QLineEdit(self.centralwidget) self.lineEdit_path_to_write.setObjectName("lineEdit_path_to_write") self.gridLayout.addWidget(self.lineEdit_path_to_write, 6, 1, 1, 3) self.line_2 = QtWidgets.QFrame(self.centralwidget) self.line_2.setFrameShape(QtWidgets.QFrame.HLine) self.line_2.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_2.setObjectName("line_2") self.gridLayout.addWidget(self.line_2, 1, 0, 1, 5) self.line_3 = QtWidgets.QFrame(self.centralwidget) self.line_3.setFrameShape(QtWidgets.QFrame.HLine) self.line_3.setFrameShadow(QtWidgets.QFrame.Sunken) self.line_3.setObjectName("line_3") self.gridLayout.addWidget(self.line_3, 5, 0, 1, 5) self.groupBox_information = QtWidgets.QGroupBox(self.centralwidget) self.groupBox_information.setObjectName("groupBox_information") self.gridLayout_2 = QtWidgets.QGridLayout(self.groupBox_information) self.gridLayout_2.setObjectName("gridLayout_2") self.label_5 = QtWidgets.QLabel(self.groupBox_information) self.label_5.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_5.setObjectName("label_5") self.gridLayout_2.addWidget(self.label_5, 5, 0, 1, 1) self.label_6 = QtWidgets.QLabel(self.groupBox_information) self.label_6.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_6.setObjectName("label_6") self.gridLayout_2.addWidget(self.label_6, 2, 0, 1, 1) self.label_2 = QtWidgets.QLabel(self.groupBox_information) self.label_2.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_2.setObjectName("label_2") self.gridLayout_2.addWidget(self.label_2, 1, 0, 1, 1) self.label_7 = QtWidgets.QLabel(self.groupBox_information) self.label_7.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_7.setObjectName("label_7") self.gridLayout_2.addWidget(self.label_7, 10, 0, 1, 1) self.label_3 = QtWidgets.QLabel(self.groupBox_information) self.label_3.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_3.setObjectName("label_3") self.gridLayout_2.addWidget(self.label_3, 4, 0, 1, 1) self.label_9 = QtWidgets.QLabel(self.groupBox_information) self.label_9.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_9.setObjectName("label_9") self.gridLayout_2.addWidget(self.label_9, 0, 0, 1, 1) self.label_file_name = QtWidgets.QLabel(self.groupBox_information) self.label_file_name.setObjectName("label_file_name") self.gridLayout_2.addWidget(self.label_file_name, 0, 1, 1, 1) self.label_size = QtWidgets.QLabel(self.groupBox_information) self.label_size.setObjectName("label_size") self.gridLayout_2.addWidget(self.label_size, 1, 1, 1, 1) self.label_number_of_channels = QtWidgets.QLabel(self.groupBox_information) self.label_number_of_channels.setObjectName("label_number_of_channels") self.gridLayout_2.addWidget(self.label_number_of_channels, 4, 1, 1, 1) self.label_compression = QtWidgets.QLabel(self.groupBox_information) self.label_compression.setObjectName("label_compression") self.gridLayout_2.addWidget(self.label_compression, 10, 1, 1, 1) self.label_number_of_frames = QtWidgets.QLabel(self.groupBox_information) self.label_number_of_frames.setObjectName("label_number_of_frames") self.gridLayout_2.addWidget(self.label_number_of_frames, 5, 1, 1, 1) self.label_sample_width = QtWidgets.QLabel(self.groupBox_information) self.label_sample_width.setObjectName("label_sample_width") self.gridLayout_2.addWidget(self.label_sample_width, 2, 1, 1, 1) self.label_4 = QtWidgets.QLabel(self.groupBox_information) self.label_4.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter) self.label_4.setObjectName("label_4") self.gridLayout_2.addWidget(self.label_4, 3, 0, 1, 1) self.label_frame_rate = QtWidgets.QLabel(self.groupBox_information) self.label_frame_rate.setObjectName("label_frame_rate") self.gridLayout_2.addWidget(self.label_frame_rate, 3, 1, 1, 1) self.gridLayout.addWidget(self.groupBox_information, 2, 0, 1, 3) self.pushButton_convert = QtWidgets.QPushButton(self.centralwidget) self.pushButton_convert.setMinimumSize(QtCore.QSize(0, 0)) self.pushButton_convert.setMaximumSize(QtCore.QSize(16777215, 16777215)) self.pushButton_convert.setObjectName("pushButton_convert") self.gridLayout.addWidget(self.pushButton_convert, 9, 3, 1, 2) self.pushButton_path_to_read = QtWidgets.QPushButton(self.centralwidget) self.pushButton_path_to_read.setObjectName("pushButton_path_to_read") self.gridLayout.addWidget(self.pushButton_path_to_read, 0, 4, 1, 1) self.pushButton_path_to_write = QtWidgets.QPushButton(self.centralwidget) self.pushButton_path_to_write.setObjectName("pushButton_path_to_write") self.gridLayout.addWidget(self.pushButton_path_to_write, 6, 4, 1, 1) self.pushButton_close = QtWidgets.QPushButton(self.centralwidget) self.pushButton_close.setObjectName("pushButton_close") self.gridLayout.addWidget(self.pushButton_close, 9, 2, 1, 1) MainWindow.setCentralWidget(self.centralwidget) self.menubar = QtWidgets.QMenuBar(MainWindow) self.menubar.setGeometry(QtCore.QRect(0, 0, 550, 25)) self.menubar.setObjectName("menubar") self.menuFile = QtWidgets.QMenu(self.menubar) self.menuFile.setObjectName("menuFile") self.menuAbout = QtWidgets.QMenu(self.menubar) self.menuAbout.setObjectName("menuAbout") MainWindow.setMenuBar(self.menubar) self.actionOpen_WAV = QtWidgets.QAction(MainWindow) self.actionOpen_WAV.setObjectName("actionOpen_WAV") self.actionSave_DAT = QtWidgets.QAction(MainWindow) self.actionSave_DAT.setObjectName("actionSave_DAT") self.actionWAV2DAT = QtWidgets.QAction(MainWindow) self.actionWAV2DAT.setObjectName("actionWAV2DAT") self.actionWAV_Format = QtWidgets.QAction(MainWindow) self.actionWAV_Format.setObjectName("actionWAV_Format") self.actionDAT_Format = QtWidgets.QAction(MainWindow) self.actionDAT_Format.setObjectName("actionDAT_Format") self.actionAuthor = QtWidgets.QAction(MainWindow) self.actionAuthor.setObjectName("actionAuthor") self.menuFile.addAction(self.actionOpen_WAV) self.menuFile.addAction(self.actionSave_DAT) self.menuAbout.addAction(self.actionAuthor) self.menubar.addAction(self.menuFile.menuAction()) self.menubar.addAction(self.menuAbout.menuAction()) self.retranslateUi(MainWindow) QtCore.QMetaObject.connectSlotsByName(MainWindow) def retranslateUi(self, MainWindow): _translate = QtCore.QCoreApplication.translate MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) self.label.setText(_translate("MainWindow", "Load WAV File In:")) self.label_10.setText(_translate("MainWindow", "Save DAT File To:")) self.groupBox_options.setTitle(_translate("MainWindow", "Options")) self.checkBox_information.setText(_translate("MainWindow", "Export DAT with information")) self.checkBox_same_name.setText(_translate("MainWindow", "Use the same name")) self.label_8.setText(_translate("MainWindow", "New Sample Rate (pts/s)")) self.groupBox_information.setTitle(_translate("MainWindow", "Information")) self.label_5.setText(_translate("MainWindow", "Number of Frames (#):")) self.label_6.setText(_translate("MainWindow", "Sample Width (Bytes):")) self.label_2.setText(_translate("MainWindow", "Total Size (Bytes):")) self.label_7.setText(_translate("MainWindow", "Compression:")) self.label_3.setText(_translate("MainWindow", "Number of Channels (#):")) self.label_9.setText(_translate("MainWindow", "File Name:")) self.label_file_name.setText(_translate("MainWindow", "Unknown")) self.label_size.setText(_translate("MainWindow", "Unknown")) self.label_number_of_channels.setText(_translate("MainWindow", "Unknown")) self.label_compression.setText(_translate("MainWindow", "Unknown")) self.label_number_of_frames.setText(_translate("MainWindow", "Unknown")) self.label_sample_width.setText(_translate("MainWindow", "Unknown")) self.label_4.setText(_translate("MainWindow", "Sample Rate (Hz):")) self.label_frame_rate.setText(_translate("MainWindow", "Unknown")) self.pushButton_convert.setText(_translate("MainWindow", "Convert")) self.pushButton_path_to_read.setText(_translate("MainWindow", "Search")) self.pushButton_path_to_write.setText(_translate("MainWindow", "Search")) self.pushButton_close.setText(_translate("MainWindow", "Close")) self.menuFile.setTitle(_translate("MainWindow", "File")) self.menuAbout.setTitle(_translate("MainWindow", "About")) self.actionOpen_WAV.setText(_translate("MainWindow", "Load WAV")) self.actionSave_DAT.setText(_translate("MainWindow", "Convert to DAT")) self.actionWAV2DAT.setText(_translate("MainWindow", "WAV2DAT")) self.actionWAV_Format.setText(_translate("MainWindow", "WAV Format")) self.actionDAT_Format.setText(_translate("MainWindow", "DAT Format")) self.actionAuthor.setText(_translate("MainWindow", "Author")) import wavytool.images.rc_wavy_rc
from __future__ import absolute_import, print_function, division from abc import abstractmethod, ABCMeta import six from typing import List # noqa from mitmproxy import filt from mitmproxy import models # noqa @six.add_metaclass(ABCMeta) class FlowList(object): def __init__(self): self._list = [] # type: List[models.Flow] def __iter__(self): return iter(self._list) def __contains__(self, item): return item in self._list def __getitem__(self, item): return self._list[item] def __bool__(self): return bool(self._list) if six.PY2: __nonzero__ = __bool__ def __len__(self): return len(self._list) def index(self, f): return self._list.index(f) @abstractmethod def _add(self, f): return @abstractmethod def _update(self, f): return @abstractmethod def _remove(self, f): return def _pos(*args): return True class FlowView(FlowList): def __init__(self, store, filt=None): super(FlowView, self).__init__() if not filt: filt = _pos self._build(store, filt) self.store = store self.store.views.append(self) def _close(self): self.store.views.remove(self) def _build(self, flows, filt=None): if filt: self.filt = filt self._list = list(filter(self.filt, flows)) def _add(self, f): if self.filt(f): self._list.append(f) def _update(self, f): if f not in self._list: self._add(f) elif not self.filt(f): self._remove(f) def _remove(self, f): if f in self._list: self._list.remove(f) def _recalculate(self, flows): self._build(flows) class FlowStore(FlowList): """ Responsible for handling flows in the state: Keeps a list of all flows and provides views on them. """ def __init__(self): super(FlowStore, self).__init__() self._set = set() # Used for O(1) lookups self.views = [] self._recalculate_views() def get(self, flow_id): for f in self._list: if f.id == flow_id: return f def __contains__(self, f): return f in self._set def _add(self, f): """ Adds a flow to the state. The flow to add must not be present in the state. """ self._list.append(f) self._set.add(f) for view in self.views: view._add(f) def _update(self, f): """ Notifies the state that a flow has been updated. The flow must be present in the state. """ if f in self: for view in self.views: view._update(f) def _remove(self, f): """ Deletes a flow from the state. The flow must be present in the state. """ self._list.remove(f) self._set.remove(f) for view in self.views: view._remove(f) # Expensive bulk operations def _extend(self, flows): """ Adds a list of flows to the state. The list of flows to add must not contain flows that are already in the state. """ self._list.extend(flows) self._set.update(flows) self._recalculate_views() def _clear(self): self._list = [] self._set = set() self._recalculate_views() def _recalculate_views(self): """ Expensive operation: Recalculate all the views after a bulk change. """ for view in self.views: view._recalculate(self) # Utility functions. # There are some common cases where we need to argue about all flows # irrespective of filters on the view etc (i.e. on shutdown). def active_count(self): c = 0 for i in self._list: if not i.response and not i.error: c += 1 return c # TODO: Should accept_all operate on views or on all flows? def accept_all(self, master): for f in self._list: f.accept_intercept(master) def kill_all(self, master): for f in self._list: if not f.reply.acked: f.kill(master) class State(object): def __init__(self): self.flows = FlowStore() self.view = FlowView(self.flows, None) # These are compiled filt expressions: self.intercept = None @property def limit_txt(self): return getattr(self.view.filt, "pattern", None) def flow_count(self): return len(self.flows) # TODO: All functions regarding flows that don't cause side-effects should # be moved into FlowStore. def index(self, f): return self.flows.index(f) def active_flow_count(self): return self.flows.active_count() def add_flow(self, f): """ Add a request to the state. """ self.flows._add(f) return f def update_flow(self, f): """ Add a response to the state. """ self.flows._update(f) return f def delete_flow(self, f): self.flows._remove(f) def load_flows(self, flows): self.flows._extend(flows) def set_limit(self, txt): if txt == self.limit_txt: return if txt: f = filt.parse(txt) if not f: return "Invalid filter expression." self.view._close() self.view = FlowView(self.flows, f) else: self.view._close() self.view = FlowView(self.flows, None) def set_intercept(self, txt): if txt: f = filt.parse(txt) if not f: return "Invalid filter expression." self.intercept = f else: self.intercept = None @property def intercept_txt(self): return getattr(self.intercept, "pattern", None) def clear(self): self.flows._clear() def accept_all(self, master): self.flows.accept_all(master) def backup(self, f): f.backup() self.update_flow(f) def revert(self, f): f.revert() self.update_flow(f) def killall(self, master): self.flows.kill_all(master)
# coding: utf-8 # # Copyright 2020 The Oppia Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Unit tests for expression_parser.py.""" from __future__ import annotations from core.domain import expression_parser from core.tests import test_utils class HelperFunctionsUnitTests(test_utils.GenericTestBase): """Test the 'contains_balanced_brackets' and 'is_algebraic' helper functions. """ def test_contains_balanced_brackets(self): """Tests for contains_balanced_brackets method.""" self.assertTrue(expression_parser.contains_balanced_brackets('')) self.assertTrue(expression_parser.contains_balanced_brackets('a+2')) self.assertTrue(expression_parser.contains_balanced_brackets('(a / 2)')) self.assertTrue(expression_parser.contains_balanced_brackets('[a/ 2]')) self.assertTrue(expression_parser.contains_balanced_brackets(' {a/2} ')) self.assertTrue(expression_parser.contains_balanced_brackets('([a]/2)')) self.assertTrue(expression_parser.contains_balanced_brackets( '[(a/{ 2 })]')) self.assertTrue(expression_parser.contains_balanced_brackets( '(([{}]{})( ){[ ]})')) self.assertTrue(expression_parser.contains_balanced_brackets( '[[ [((()))[[[[[]{}]]{}]]()]] ]')) self.assertTrue(expression_parser.contains_balanced_brackets( '{( 2x^2 ) ^ [ 3/2 ]} / 4')) self.assertFalse(expression_parser.contains_balanced_brackets('(a/2')) self.assertFalse(expression_parser.contains_balanced_brackets('a/2]')) self.assertFalse(expression_parser.contains_balanced_brackets('[)(]')) self.assertFalse(expression_parser.contains_balanced_brackets('{ [} ]')) self.assertFalse(expression_parser.contains_balanced_brackets(']]][[[')) self.assertFalse(expression_parser.contains_balanced_brackets(')({})')) self.assertFalse(expression_parser.contains_balanced_brackets('4/{0/]')) self.assertFalse(expression_parser.contains_balanced_brackets('(a/2]')) def test_is_algebraic(self): """Tests for is_algebraic method.""" self.assertTrue(expression_parser.is_algebraic('a^2.3')) self.assertTrue(expression_parser.is_algebraic('abs(alpha)')) self.assertTrue(expression_parser.is_algebraic('alpha/gamma')) self.assertTrue(expression_parser.is_algebraic('A + 2/3')) # The following tests might seem as invalid but the individual letters # will be joined via '*' during tokenization which makes them valid. self.assertTrue(expression_parser.is_algebraic('Alpha')) self.assertTrue(expression_parser.is_algebraic('invalid + 2')) self.assertTrue(expression_parser.is_algebraic('alpha + bet/22')) self.assertFalse(expression_parser.is_algebraic('1 + 2')) self.assertFalse(expression_parser.is_algebraic('1^2^3/4')) self.assertFalse(expression_parser.is_algebraic('1')) self.assertFalse(expression_parser.is_algebraic('sqrt(4/4)')) self.assertFalse(expression_parser.is_algebraic('tan(30)')) with self.assertRaisesRegex(Exception, 'Invalid bracket pairing.'): expression_parser.is_algebraic('1 +2)') with self.assertRaisesRegex(Exception, 'Invalid character: ~.'): expression_parser.is_algebraic('a~2') with self.assertRaisesRegex(Exception, 'Invalid character: !.'): expression_parser.is_algebraic('4! 2') with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.is_algebraic('alpha + bet/22.3.4') def test_tokenize(self): """Tests for tokenize method.""" expression = 'a+b' expected_output = ['a', '+', 'b'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = '53.4 - 6/alpha' expected_output = ['53.4', '-', '6', '/', 'alpha'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'a^0.5 + (-zeta)' expected_output = ['a', '^', '0.5', '+', '(', '-', 'zeta', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'sqrt(3/[-A])' expected_output = ['sqrt', '(', '3', '/', '(', '-', 'A', ')', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'abs(sqrt(3)) * 4/ 2^ 3 ' expected_output = [ 'abs', '(', 'sqrt', '(', '3', ')', ')', '*', '4', '/', '2', '^', '3'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = '' expected_output = [] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = '3.4^4.3/0.0005 * {9}' expected_output = ['3.4', '^', '4.3', '/', '0.0005', '*', '(', '9', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'ab' expected_output = ['a', '*', 'b'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'a**bc' expected_output = ['a', '*', '*', 'b', '*', 'c'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'Alpha' expected_output = ['A', '*', 'l', '*', 'p', '*', 'h', '*', 'a'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'alpha' expected_output = ['alpha'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'alphax' expected_output = ['alpha', '*', 'x'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'xalpha' expected_output = ['x', '*', 'alpha'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = '2.2gamma/23' expected_output = ['2.2', '*', 'gamma', '/', '23'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = '2pir^2/2' expected_output = ['2', '*', 'pi', '*', 'r', '^', '2', '/', '2'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'sigmaepsilon' expected_output = ['sigma', '*', 'epsilon'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'sqrt(epsilonpsi-2abeta)' expected_output = [ 'sqrt', '(', 'epsilon', '*', 'psi', '-', '2', '*', 'a', '*', 'beta', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'alphasqrt(3/4)' expected_output = ['alpha', '*', 'sqrt', '(', '3', '/', '4', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'tan(theta)cos(theta)' expected_output = [ 'tan', '(', 'theta', ')', '*', 'cos', '(', 'theta', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = '(a+b)(a-b)' expected_output = [ '(', 'a', '+', 'b', ')', '*', '(', 'a', '-', 'b', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'xsqrt(2)x' expected_output = [ 'x', '*', 'sqrt', '(', '2', ')', '*', 'x'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'sin(pi)(a - x^2alpha)' expected_output = [ 'sin', '(', 'pi', ')', '*', '(', 'a', '-', 'x', '^', '2', '*', 'alpha', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) expression = 'cosh(3a45theta) + sin(x(theta))' expected_output = [ 'cosh', '(', '3', '*', 'a', '*', '45', '*', 'theta', ')', '+', 'sin', '(', 'x', '*', '(', 'theta', ')', ')'] actual_output = map( lambda x: x.text, expression_parser.tokenize(expression)) self.assertEqual(list(actual_output), expected_output) with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('a.3') with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('.3 - 2.4') with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('1.2.3 + 4/2') with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('a . . 3') with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('3..4') with self.assertRaisesRegex(Exception, 'Invalid token: ..'): expression_parser.tokenize('..5') def test_get_variables(self): """Tests for get_variables method.""" self.assertItemsEqual(expression_parser.get_variables('a^2.3'), ['a']) self.assertItemsEqual( expression_parser.get_variables('abs(alpha)'), ['alpha']) self.assertItemsEqual( expression_parser.get_variables('alpha/gamma'), ['alpha', 'gamma']) self.assertEqual(expression_parser.get_variables('A + 2/3'), ['A']) self.assertItemsEqual( expression_parser.get_variables('alphabetagamma'), ['alpha', 'beta', 'gamma']) self.assertItemsEqual( expression_parser.get_variables('betalphaa'), ['a', 'p', 'beta', 'l', 'h']) self.assertItemsEqual( expression_parser.get_variables('a+a*a/aa^a-a'), ['a']) self.assertItemsEqual(expression_parser.get_variables( 'sqrt(3+x^y)/abs(gamma)'), ['y', 'x', 'gamma']) self.assertItemsEqual( expression_parser.get_variables('a=3+4'), ['a']) self.assertItemsEqual(expression_parser.get_variables( '(a-2)^beta = alpha/gamma'), ['a', 'alpha', 'beta', 'gamma']) self.assertItemsEqual( expression_parser.get_variables('4=abs(-4)'), []) self.assertItemsEqual( expression_parser.get_variables('a^pi + e/2'), ['a', 'pi', 'e']) self.assertItemsEqual( expression_parser.get_variables('pi-3.14e'), ['pi', 'e']) self.assertItemsEqual( expression_parser.get_variables('epi'), ['pi', 'e']) class TokenUnitTests(test_utils.GenericTestBase): """Test the token module.""" def test_is_function(self): """Tests for is_function method.""" self.assertEqual(expression_parser.Token('sqrt').category, 'function') self.assertEqual(expression_parser.Token('abs').category, 'function') self.assertEqual(expression_parser.Token('tan').category, 'function') with self.assertRaisesRegex(Exception, 'Invalid token: tan().'): expression_parser.Token('tan()') with self.assertRaisesRegex(Exception, 'Invalid token: Sqrt.'): expression_parser.Token('Sqrt') def test_is_identifier(self): """Tests for is_identifier method.""" self.assertEqual(expression_parser.Token('a').category, 'identifier') self.assertEqual(expression_parser.Token('a').category, 'identifier') self.assertEqual( expression_parser.Token('alpha').category, 'identifier') self.assertEqual(expression_parser.Token('A').category, 'identifier') with self.assertRaisesRegex(Exception, 'Invalid token: al.'): expression_parser.Token('al') self.assertNotEqual( expression_parser.Token('5').category, 'identifier') def test_is_number(self): """Tests for is_number method.""" self.assertEqual(expression_parser.Token('1').category, 'number') self.assertEqual(expression_parser.Token('123').category, 'number') self.assertEqual(expression_parser.Token('12.34').category, 'number') self.assertEqual(expression_parser.Token('0.004').category, 'number') self.assertEqual(expression_parser.Token('pi').category, 'number') self.assertEqual(expression_parser.Token('e').category, 'number') with self.assertRaisesRegex(Exception, 'Invalid token: 8.4.3.'): expression_parser.Token('8.4.3') def test_is_operator(self): """Tests for is_operator method.""" self.assertEqual(expression_parser.Token('+').category, 'operator') self.assertEqual(expression_parser.Token('-').category, 'operator') self.assertEqual(expression_parser.Token('*').category, 'operator') self.assertEqual(expression_parser.Token('/').category, 'operator') self.assertEqual(expression_parser.Token('^').category, 'operator') self.assertEqual(expression_parser.Token('(').category, 'operator') self.assertEqual(expression_parser.Token(')').category, 'operator') class ParserUnitTests(test_utils.GenericTestBase): """Test the expression parser module.""" def test_parse(self): """Tests to check whether the following production rule is implemented correctly: <expr> ::= <mul_expr> (('+' | '-') <mul_expr>)* The parse tree for 'a + b - 2' should be built as follows: {-} / | {+} {2} / | {a} {b} """ root_node = expression_parser.Parser().parse('a + b - 2') # Root node {-}. self.assertIsInstance( root_node, expression_parser.SubtractionOperatorNode) self.assertEqual(len(root_node.children), 2) left_child_1, right_child_1 = root_node.children # Left child 1 {+}. self.assertIsInstance( left_child_1, expression_parser.AdditionOperatorNode) self.assertEqual(len(left_child_1.children), 2) # Right child 1 {2}. self.assertIsInstance(right_child_1, expression_parser.NumberNode) self.assertEqual(right_child_1.token.text, '2') self.assertEqual(len(right_child_1.children), 0) left_child_2, right_child_2 = left_child_1.children # Left child 2 {a}. self.assertIsInstance(left_child_2, expression_parser.IdentifierNode) self.assertEqual(left_child_2.token.text, 'a') self.assertEqual(len(left_child_2.children), 0) # Right child 2 {b}. self.assertIsInstance(right_child_2, expression_parser.IdentifierNode) self.assertEqual(right_child_2.token.text, 'b') self.assertEqual(len(right_child_2.children), 0) def test_parse_mul_expr(self): """Tests to check whether the following production rule is implemented correctly: <mul_expr> ::= <pow_expr> (('*' | '/') <pow_expr>)* The parse tree for 'a / b * 2' should be built as follows: {*} / | {/} {2} / | {a} {b} """ root_node = expression_parser.Parser().parse('a / b * 2') # Root node {*}. self.assertIsInstance( root_node, expression_parser.MultiplicationOperatorNode) self.assertEqual(len(root_node.children), 2) left_child_1, right_child_1 = root_node.children # Left child 1 {/}. self.assertIsInstance( left_child_1, expression_parser.DivisionOperatorNode) self.assertEqual(len(left_child_1.children), 2) # Right child 1 {2}. self.assertIsInstance(right_child_1, expression_parser.NumberNode) self.assertEqual(right_child_1.token.text, '2') self.assertEqual(len(right_child_1.children), 0) left_child_2, right_child_2 = left_child_1.children # Left child 2 {a}. self.assertIsInstance(left_child_2, expression_parser.IdentifierNode) self.assertEqual(left_child_2.token.text, 'a') self.assertEqual(len(left_child_2.children), 0) # Right child 2 {b}. self.assertIsInstance(right_child_2, expression_parser.IdentifierNode) self.assertEqual(right_child_2.token.text, 'b') self.assertEqual(len(right_child_2.children), 0) def test_parse_pow_expr(self): """Tests to check whether the following production rule is implemented correctly: <pow_expr> ::= '-' <pow_expr> | '+' <pow_expr> | <unit> ('^' <pow_expr>)? The parse tree for 'a ^ b ^ 2' should be built as follows: {^} / | {a} {^} / | {b} {2} """ root_node = expression_parser.Parser().parse('a ^ b ^ 2') # Root node {^}. self.assertIsInstance(root_node, expression_parser.PowerOperatorNode) self.assertEqual(len(root_node.children), 2) left_child_1, right_child_1 = root_node.children # Left child 1 {a}. self.assertIsInstance(left_child_1, expression_parser.IdentifierNode) self.assertEqual(left_child_1.token.text, 'a') self.assertEqual(len(left_child_1.children), 0) # Right child 1 {^}. self.assertIsInstance( right_child_1, expression_parser.PowerOperatorNode) self.assertEqual(len(right_child_1.children), 2) left_child_2, right_child_2 = right_child_1.children # Left child 2 {b}. self.assertIsInstance(left_child_2, expression_parser.IdentifierNode) self.assertEqual(left_child_2.token.text, 'b') self.assertEqual(len(left_child_2.children), 0) # Right child 2 {2}. self.assertIsInstance(right_child_2, expression_parser.NumberNode) self.assertEqual(right_child_2.token.text, '2') self.assertEqual(len(right_child_2.children), 0) def test_parse_unit(self): """Tests to check whether the following production rule is implemented correctly: <unit> ::= <identifier> | <number> | '(' <expr> ')' | <function> '(' <expr> ')' The parse tree for 'sqrt(a*2)' should be built as follows: {sqrt} | {*} / | {a} {2} """ root_node = expression_parser.Parser().parse('sqrt(a*2)') # Root node {sqrt}. self.assertIsInstance(root_node, expression_parser.UnaryFunctionNode) self.assertEqual(len(root_node.children), 1) child_1 = root_node.children[0] # Child 1 {*}. self.assertIsInstance( child_1, expression_parser.MultiplicationOperatorNode) self.assertEqual(len(child_1.children), 2) left_child_2, right_child_2 = child_1.children # Left child 2 {a}. self.assertIsInstance(left_child_2, expression_parser.IdentifierNode) self.assertEqual(left_child_2.token.text, 'a') self.assertEqual(len(left_child_2.children), 0) # Right child 2 {2}. self.assertIsInstance(right_child_2, expression_parser.NumberNode) self.assertEqual(right_child_2.token.text, '2') self.assertEqual(len(right_child_2.children), 0) def test_validates_math_expression(self): """Tests whether the parser can validate math expressions.""" self.assertTrue(expression_parser.is_valid_expression('a+b')) self.assertTrue(expression_parser.is_valid_expression('a+(-b)')) self.assertTrue(expression_parser.is_valid_expression('-a+b')) self.assertTrue(expression_parser.is_valid_expression('a+b^(-2)')) self.assertTrue(expression_parser.is_valid_expression('a+b/2.3')) self.assertTrue(expression_parser.is_valid_expression('ab/2')) self.assertTrue(expression_parser.is_valid_expression('a(b+c)')) self.assertTrue(expression_parser.is_valid_expression('2x + 3/2')) self.assertTrue(expression_parser.is_valid_expression('alpha + bet/2')) self.assertTrue(expression_parser.is_valid_expression('Alpha/2')) self.assertTrue(expression_parser.is_valid_expression( '42 - [5/a] (4)')) self.assertTrue(expression_parser.is_valid_expression( 'a + sqrt(beta/gamma)')) self.assertTrue(expression_parser.is_valid_expression( 'cos(theta/2^epsilon)')) self.assertTrue(expression_parser.is_valid_expression('a+{-b/22}')) self.assertTrue(expression_parser.is_valid_expression('abs(a^2 + b^2)')) self.assertTrue(expression_parser.is_valid_expression( 'sin(theta)^2 + cos(theta)^2')) self.assertTrue(expression_parser.is_valid_expression('(2*pi*r^2)/2')) self.assertTrue(expression_parser.is_valid_expression('1 + (2*a)')) self.assertTrue(expression_parser.is_valid_expression('(a+ b) ')) self.assertTrue(expression_parser.is_valid_expression( '{a+(beta - gamma)}')) self.assertTrue(expression_parser.is_valid_expression( '(a) / ((b)/(c))')) self.assertTrue(expression_parser.is_valid_expression( '{a+(b-[c])-(beta^4)}')) self.assertTrue(expression_parser.is_valid_expression('alpha + (-3)')) self.assertTrue(expression_parser.is_valid_expression( 'alpha^(3.9/beta*gamma)')) self.assertTrue(expression_parser.is_valid_expression( '{a-(-3)/(2-(-b)^4)}^2')) self.assertTrue(expression_parser.is_valid_expression( 'a+(-3)/alpha + gamma^2')) self.assertTrue(expression_parser.is_valid_expression('(x+y) * (x-y)')) self.assertTrue(expression_parser.is_valid_expression( '(a+ b)^2 - (c+d) ^ 3')) self.assertTrue(expression_parser.is_valid_expression('3+2')) self.assertTrue(expression_parser.is_valid_expression('---+34')) self.assertTrue(expression_parser.is_valid_expression('---(3/+4)')) self.assertTrue(expression_parser.is_valid_expression('3+2^3')) self.assertTrue(expression_parser.is_valid_expression('(5-2^[6+3])')) self.assertTrue(expression_parser.is_valid_expression('(-5)^(-1)/2')) self.assertTrue(expression_parser.is_valid_expression( '2*10^3 + 3*10^2')) self.assertTrue(expression_parser.is_valid_expression( '{55 - 2/(-3)^100 + [5-4]}')) self.assertTrue(expression_parser.is_valid_expression('(3^2) - (4^2)')) self.assertTrue(expression_parser.is_valid_expression( '(1+2+3)/(1-2-3)')) self.assertTrue(expression_parser.is_valid_expression( '24.6 + 3^(-1/2)')) self.assertTrue(expression_parser.is_valid_expression('1^1^1^1^1^1^1')) self.assertTrue(expression_parser.is_valid_expression( '1000 + 200 + 30 + 4')) self.assertTrue(expression_parser.is_valid_expression('(1.01)^39')) self.assertTrue(expression_parser.is_valid_expression('506/(2-3)^(-3)')) self.assertTrue(expression_parser.is_valid_expression('sqrt(-1)')) self.assertTrue(expression_parser.is_valid_expression( 'sqrt(-abs(-1))^2/abs(5)')) self.assertFalse(expression_parser.is_valid_expression('a+b/')) self.assertFalse(expression_parser.is_valid_expression('|x|')) self.assertFalse(expression_parser.is_valid_expression('||')) self.assertFalse(expression_parser.is_valid_expression('|x+y|-z')) self.assertFalse(expression_parser.is_valid_expression('a^2.')) self.assertFalse(expression_parser.is_valid_expression('(352+)-3*x')) self.assertFalse(expression_parser.is_valid_expression('(a-2^34-)')) self.assertFalse(expression_parser.is_valid_expression( '(25 + 3.4.3*a)')) self.assertFalse(expression_parser.is_valid_expression('sqrt(abs)')) self.assertFalse(expression_parser.is_valid_expression( 'alpha + bet/2.3.4')) self.assertFalse(expression_parser.is_valid_expression('a_b')) self.assertFalse(expression_parser.is_valid_expression('!/')) self.assertFalse(expression_parser.is_valid_expression('a~b')) self.assertFalse(expression_parser.is_valid_expression('a*b)')) self.assertFalse(expression_parser.is_valid_expression('(a}+{b)')) self.assertFalse(expression_parser.is_valid_expression('{a+b)(c}')) self.assertFalse(expression_parser.is_valid_expression('a**b')) self.assertFalse(expression_parser.is_valid_expression('(a)^/(b)')) self.assertFalse(expression_parser.is_valid_expression('a+/3')) self.assertFalse(expression_parser.is_valid_expression('a=b')) self.assertFalse(expression_parser.is_valid_expression('a<b')) self.assertFalse(expression_parser.is_valid_expression('a>b')) self.assertFalse(expression_parser.is_valid_expression('a<=b')) self.assertFalse(expression_parser.is_valid_expression('a>=b')) self.assertFalse(expression_parser.is_valid_expression('3+2/*a')) self.assertFalse(expression_parser.is_valid_expression('192.168.1 + 3')) self.assertFalse(expression_parser.is_valid_expression('{1 - 2 (/3}')) self.assertFalse(expression_parser.is_valid_expression('[5^(3-2])')) self.assertFalse(expression_parser.is_valid_expression( '55.02//3.5-(-a)')) self.assertFalse(expression_parser.is_valid_expression( 'alpha + beta-^1')) self.assertFalse(expression_parser.is_valid_expression('(3+2]')) self.assertFalse(expression_parser.is_valid_expression('3!2')) self.assertFalse(expression_parser.is_valid_expression('3~2')) self.assertFalse(expression_parser.is_valid_expression('3-/2')) self.assertFalse(expression_parser.is_valid_expression('3-5=(-2)')) self.assertFalse(expression_parser.is_valid_expression('3 > 2'))
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo.utils import excutils from testtools import testcase from sahara.tests.integration.configs import config as cfg from sahara.tests.integration.tests import cinder from sahara.tests.integration.tests import edp from sahara.tests.integration.tests import map_reduce from sahara.tests.integration.tests import scaling from sahara.tests.integration.tests import swift from sahara.utils import edp as utils_edp class MaprGatingTest(cinder.CinderVolumeTest, edp.EDPTest, map_reduce.MapReduceTest, swift.SwiftTest, scaling.ScalingTest): config = cfg.ITConfig().mapr_config SKIP_CINDER_TEST = config.SKIP_CINDER_TEST SKIP_EDP_TEST = config.SKIP_EDP_TEST SKIP_MAP_REDUCE_TEST = config.SKIP_MAP_REDUCE_TEST SKIP_SWIFT_TEST = config.SKIP_SWIFT_TEST SKIP_SCALING_TEST = config.SKIP_SCALING_TEST @testcase.skipIf(config.SKIP_ALL_TESTS_FOR_PLUGIN, 'All tests for MAPR plugin were skipped') @testcase.attr('mapr1') def test_mapr_plugin_gating(self): self.mapr_config.IMAGE_ID, self.mapr_config.SSH_USERNAME = ( self.get_image_id_and_ssh_username(self.mapr_config)) # Default value of self.common_config.FLOATING_IP_POOL is None floating_ip_pool = self.common_config.FLOATING_IP_POOL internal_neutron_net = None # If Neutron enabled then get ID of floating IP pool and ID of internal # Neutron network if self.common_config.NEUTRON_ENABLED: floating_ip_pool = self.get_floating_ip_pool_id_for_neutron_net() internal_neutron_net = self.get_internal_neutron_net_id() if not self.mapr_config.SKIP_CINDER_TEST: volumes_per_node = 2 else: volumes_per_node = 0 node_group_template_id_list = [] # ------------------------------CLUSTER CREATION------------------------------- # ----------------------"tt-dn" node group template creation------------------- try: node_group_template_tt_dn_id = self.create_node_group_template( name='test-node-group-template-mapr-tt-dn', plugin_config=self.mapr_config, description='test node group template for MAPR plugin', volumes_per_node=volumes_per_node, node_processes=self.mapr_config.WORKER_NODE_PROCESSES, # NEED CREATE WORKER_NODE_PROCESSES node_configs={}, floating_ip_pool=floating_ip_pool ) node_group_template_id_list.append(node_group_template_tt_dn_id) except Exception as e: with excutils.save_and_reraise_exception(): message = ('Failure while \'tt-dn\' node group ' 'template creation: ') self.print_error_log(message, e) # --------------------------Cluster template creation-------------------------- try: cluster_template_id = self.create_cluster_template( name='test-cluster-template-mapr', plugin_config=self.mapr_config, description='test cluster template for MAPR plugin', cluster_configs={}, node_groups=[ dict( name='master-node-jt-nn', flavor_id=self.flavor_id, node_processes=self.mapr_config.MASTER_NODE_PROCESSES, # NEED CREATE MASTER_NODE_PROCESSES node_configs={}, floating_ip_pool=floating_ip_pool, count=1), dict( name='worker-node-tt-dn', node_group_template_id=node_group_template_tt_dn_id, count=3) ], net_id=internal_neutron_net ) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( node_group_template_id_list=node_group_template_id_list ) message = 'Failure while cluster template creation: ' self.print_error_log(message, e) # ------------------------------Cluster creation------------------------------- cluster_name = (self.common_config.CLUSTER_NAME + '-' + self.mapr_config.PLUGIN_NAME) try: self.create_cluster( name=cluster_name, plugin_config=self.mapr_config, cluster_template_id=cluster_template_id, description='test cluster', cluster_configs={} ) cluster_info = self.get_cluster_info(self.mapr_config) self.await_active_tasktracker( cluster_info['node_info'], self.mapr_config) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( self.cluster_id, cluster_template_id, node_group_template_id_list ) message = 'Failure while cluster creation: ' self.print_error_log(message, e) # --------------------------------CINDER TESTING------------------------------- try: self.cinder_volume_testing(cluster_info) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list ) message = 'Failure while Cinder testing: ' self.print_error_log(message, e) # ---------------------------------EDP TESTING--------------------------------- path = 'sahara/tests/integration/tests/resources/' pig_job_data = open(path + 'edp-job.pig').read() pig_lib_data = open(path + 'edp-lib.jar').read() mapreduce_jar_data = open(path + 'edp-mapreduce.jar').read() # This is a modified version of WordCount that takes swift configs java_lib_data = open(path + 'edp-java/edp-java.jar').read() java_configs = { "configs": { "edp.java.main_class": ("org.openstack.sahara.examples" ".WordCount") } } mapreduce_configs = { "configs": { "mapred.mapper.class": "org.apache.oozie.example.SampleMapper", "mapred.reducer.class": ("org.apache.oozie.example" ".SampleReducer") } } mapreduce_streaming_configs = { "configs": { "edp.streaming.mapper": "/bin/cat", "edp.streaming.reducer": "/usr/bin/wc" } } try: self.edp_testing(job_type=utils_edp.JOB_TYPE_PIG, job_data_list=[{'pig': pig_job_data}], lib_data_list=[{'jar': pig_lib_data}], swift_binaries=True, hdfs_local_output=True) self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE, job_data_list=[], lib_data_list=[{'jar': mapreduce_jar_data}], configs=mapreduce_configs, swift_binaries=True, hdfs_local_output=True) self.edp_testing(job_type=utils_edp.JOB_TYPE_MAPREDUCE_STREAMING, job_data_list=[], lib_data_list=[], configs=mapreduce_streaming_configs) self.edp_testing(job_type=utils_edp.JOB_TYPE_JAVA, job_data_list=[], lib_data_list=[{'jar': java_lib_data}], configs=java_configs, pass_input_output_args=True) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list ) message = 'Failure while EDP testing: ' self.print_error_log(message, e) # -----------------------------MAP REDUCE TESTING------------------------------ try: self.map_reduce_testing(cluster_info) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list ) message = 'Failure while Map Reduce testing: ' self.print_error_log(message, e) # --------------------------CHECK SWIFT AVAILABILITY--------------------------- try: self.check_swift_availability(cluster_info) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list ) message = 'Failure during check of Swift availability: ' self.print_error_log(message, e) # -------------------------------CLUSTER SCALING------------------------------- if not self.mapr_config.SKIP_SCALING_TEST: datanode_count_after_resizing = ( cluster_info['node_info']['datanode_count'] + self.mapr_config.SCALE_EXISTING_NG_COUNT) change_list = [ { 'operation': 'resize', 'info': ['worker-node-tt-dn', datanode_count_after_resizing] }, { 'operation': 'add', 'info': [ 'new-worker-node-tt-dn', self.mapr_config.SCALE_NEW_NG_COUNT, '%s' % node_group_template_tt_dn_id ] } ] try: new_cluster_info = self.cluster_scaling(cluster_info, change_list) self.await_active_tasktracker( new_cluster_info['node_info'], self.mapr_config) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list ) message = 'Failure while cluster scaling: ' self.print_error_log(message, e) # -------------------------CINDER TESTING AFTER SCALING------------------------ try: self.cinder_volume_testing(new_cluster_info) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( new_cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list ) message = ('Failure while Cinder testing after cluster ' 'scaling: ') self.print_error_log(message, e) # ----------------------MAP REDUCE TESTING AFTER SCALING----------------------- try: self.map_reduce_testing(new_cluster_info) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( new_cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list ) message = ('Failure while Map Reduce testing after ' 'cluster scaling: ') self.print_error_log(message, e) # -------------------CHECK SWIFT AVAILABILITY AFTER SCALING-------------------- try: self.check_swift_availability(new_cluster_info) except Exception as e: with excutils.save_and_reraise_exception(): self.delete_objects( new_cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list ) message = ('Failure during check of Swift availability ' 'after cluster scaling: ') self.print_error_log(message, e) # ---------------------------DELETE CREATED OBJECTS---------------------------- self.delete_objects( cluster_info['cluster_id'], cluster_template_id, node_group_template_id_list )
import os import time import socket from resource import getrusage, RUSAGE_SELF from twisted.application.service import Service from twisted.internet.task import LoopingCall from carbon.conf import settings stats = {} prior_stats = {} HOSTNAME = socket.gethostname().replace('.', '_') PAGESIZE = os.sysconf('SC_PAGESIZE') rusage = getrusage(RUSAGE_SELF) lastUsage = rusage.ru_utime + rusage.ru_stime lastUsageTime = time.time() # NOTE: Referencing settings in this *top level scope* will # give you *defaults* only. Probably not what you wanted. # TODO(chrismd) refactor the graphite metrics hierarchy to be cleaner, # more consistent, and make room for frontend metrics. #metric_prefix = "Graphite.backend.%(program)s.%(instance)s." % settings def increment(stat, increase=1): try: stats[stat] += increase except KeyError: stats[stat] = increase def max(stat, newval): try: if stats[stat] < newval: stats[stat] = newval except KeyError: stats[stat] = newval def append(stat, value): try: stats[stat].append(value) except KeyError: stats[stat] = [value] def getCpuUsage(): global lastUsage, lastUsageTime rusage = getrusage(RUSAGE_SELF) currentUsage = rusage.ru_utime + rusage.ru_stime currentTime = time.time() usageDiff = currentUsage - lastUsage timeDiff = currentTime - lastUsageTime if timeDiff == 0: # shouldn't be possible, but I've actually seen a ZeroDivisionError from this timeDiff = 0.000001 cpuUsagePercent = (usageDiff / timeDiff) * 100.0 lastUsage = currentUsage lastUsageTime = currentTime return cpuUsagePercent def getMemUsage(): rss_pages = int(open('/proc/self/statm').read().split()[1]) return rss_pages * PAGESIZE def recordMetrics(): global lastUsage global prior_stats myStats = stats.copy() myPriorStats = {} stats.clear() # cache metrics if settings.program == 'carbon-cache': record = cache_record updateTimes = myStats.get('updateTimes', []) committedPoints = myStats.get('committedPoints', 0) creates = myStats.get('creates', 0) droppedCreates = myStats.get('droppedCreates', 0) errors = myStats.get('errors', 0) cacheQueries = myStats.get('cacheQueries', 0) cacheBulkQueries = myStats.get('cacheBulkQueries', 0) cacheOverflow = myStats.get('cache.overflow', 0) cacheBulkQuerySizes = myStats.get('cacheBulkQuerySize', []) # Calculate cache-data-structure-derived metrics prior to storing anything # in the cache itself -- which would otherwise affect said metrics. cache_size = cache.MetricCache().size cache_queues = len(cache.MetricCache()) record('cache.size', cache_size) record('cache.queues', cache_queues) if updateTimes: avgUpdateTime = sum(updateTimes) / len(updateTimes) record('avgUpdateTime', avgUpdateTime) if committedPoints: pointsPerUpdate = float(committedPoints) / len(updateTimes) record('pointsPerUpdate', pointsPerUpdate) if cacheBulkQuerySizes: avgBulkSize = sum(cacheBulkQuerySizes) / len(cacheBulkQuerySizes) record('cache.bulk_queries_average_size', avgBulkSize) record('updateOperations', len(updateTimes)) record('committedPoints', committedPoints) record('creates', creates) record('droppedCreates', droppedCreates) record('errors', errors) record('cache.queries', cacheQueries) record('cache.bulk_queries', cacheBulkQueries) record('cache.overflow', cacheOverflow) # aggregator metrics elif settings.program == 'carbon-aggregator': record = aggregator_record record('allocatedBuffers', len(BufferManager)) record('bufferedDatapoints', sum([b.size for b in BufferManager.buffers.values()])) record('aggregateDatapointsSent', myStats.get('aggregateDatapointsSent', 0)) # relay metrics else: record = relay_record # shared relay stats for relays & aggregators if settings.program in ['carbon-aggregator', 'carbon-relay']: prefix = 'destinations.' relay_stats = [(k,v) for (k,v) in myStats.items() if k.startswith(prefix)] for stat_name, stat_value in relay_stats: record(stat_name, stat_value) # Preserve the count of sent metrics so that the ratio of # received : sent can be checked per-relay to determine the # health of the destination. if stat_name.endswith('.sent'): myPriorStats[stat_name] = stat_value # common metrics record('activeConnections', len(state.connectedMetricReceiverProtocols)) record('metricsReceived', myStats.get('metricsReceived', 0)) record('blacklistMatches', myStats.get('blacklistMatches', 0)) record('whitelistRejects', myStats.get('whitelistRejects', 0)) record('cpuUsage', getCpuUsage()) # And here preserve count of messages received in the prior periiod myPriorStats['metricsReceived'] = myStats.get('metricsReceived', 0) prior_stats.clear() prior_stats.update(myPriorStats) try: # This only works on Linux record('memUsage', getMemUsage()) except Exception: pass def cache_record(metric, value): prefix = settings.CARBON_METRIC_PREFIX if settings.instance is None: fullMetric = '%s.agents.%s.%s' % (prefix, HOSTNAME, metric) else: fullMetric = '%s.agents.%s-%s.%s' % (prefix, HOSTNAME, settings.instance, metric) datapoint = (time.time(), value) cache.MetricCache().store(fullMetric, datapoint) def relay_record(metric, value): prefix = settings.CARBON_METRIC_PREFIX if settings.instance is None: fullMetric = '%s.relays.%s.%s' % (prefix, HOSTNAME, metric) else: fullMetric = '%s.relays.%s-%s.%s' % (prefix, HOSTNAME, settings.instance, metric) datapoint = (time.time(), value) events.metricGenerated(fullMetric, datapoint) def aggregator_record(metric, value): prefix = settings.CARBON_METRIC_PREFIX if settings.instance is None: fullMetric = '%s.aggregator.%s.%s' % (prefix, HOSTNAME, metric) else: fullMetric = '%s.aggregator.%s-%s.%s' % (prefix, HOSTNAME, settings.instance, metric) datapoint = (time.time(), value) events.metricGenerated(fullMetric, datapoint) class InstrumentationService(Service): def __init__(self): self.record_task = LoopingCall(recordMetrics) def startService(self): if settings.CARBON_METRIC_INTERVAL > 0: self.record_task.start(settings.CARBON_METRIC_INTERVAL, False) Service.startService(self) def stopService(self): if settings.CARBON_METRIC_INTERVAL > 0: self.record_task.stop() Service.stopService(self) # Avoid import circularities from carbon import state, events, cache from carbon.aggregator.buffers import BufferManager
"""Figure 5 - Goal zone enrichment by place cells""" FIG_FORMAT = 'svg' circ_var_pcs = False MALES_ONLY = False import matplotlib as mpl if FIG_FORMAT == 'svg': mpl.use('agg') elif FIG_FORMAT == 'pdf': mpl.use('pdf') elif FIG_FORMAT == 'interactive': mpl.use('TkAgg') import matplotlib.pyplot as plt import numpy as np import seaborn.apionly as sns import lab import lab.analysis.place_cell_analysis as place import lab.analysis.reward_analysis as ra import lab.misc as misc import lab.plotting as plotting import Df16a_analysis as df from Fig1_GOL_task_performance import day_number_only_label, label_conditions # Colors WT_color = df.WT_color Df_color = df.Df_color colors = (WT_color, Df_color) # ROI filters WT_filter = df.WT_filter Df_filter = df.Df_filter roi_filters = (WT_filter, Df_filter) markers = df.markers linestyles = df.linestyles save_dir = df.fig_save_dir filename = 'Fig5_goal_enrichment{}.{}'.format( '_males' if MALES_ONLY else '', FIG_FORMAT) def fix_heatmap_ax(ax, expt_grp): plotting.right_label( ax, 'cell index', rotation=270, ha='center', va='center') ax.set_xlabel('Distance from reward (fraction of belt)') ax.tick_params( which='both', bottom=True, left=False, labelleft=False, labelbottom=True, top=False, right=True, direction='out') ax.yaxis.tick_right() n_cells = int(ax.get_ylim()[0] + 0.05) ax.set_yticks(ax.get_ylim()) ax.set_yticklabels([str(n_cells), '1']) xlim = ax.get_xlim() ax.set_xticks([xlim[0], np.mean(xlim), xlim[1]]) ax.set_xticklabels(['-0.5', '0.0', '0.5']) for spine in ax.spines.itervalues(): spine.set_linewidth(1) # Figure out the reward window width windows = [] for expt in expt_grp: track_length = expt[0].behaviorData()['trackLength'] window = float(expt.get('operantSpatialWindow')) windows.append(window / track_length) ax.plot([0.5, 0.5], [0, 1], transform=ax.transAxes, color='k', ls='--') ax.plot([np.mean(windows) + 0.5, np.mean(windows) + 0.5], [0, 1], transform=ax.transAxes, color='k', ls='--') ax.set_xlim(xlim) def main(): all_grps = df.loadExptGrps('GOL') WT_expt_grp = all_grps['WT_place_set'] Df_expt_grp = all_grps['Df_place_set'] expt_grps = [WT_expt_grp, Df_expt_grp] if MALES_ONLY: for expt_grp in expt_grps: expt_grp.filter(lambda expt: expt.parent.get('sex') == 'M') WT_label = WT_expt_grp.label() Df_label = Df_expt_grp.label() fig = plt.figure(figsize=(8.5, 11)) gs1 = plt.GridSpec( 2, 5, left=0.1, right=0.3, top=0.90, bottom=0.67, hspace=0.2) gs1_2 = plt.GridSpec( 2, 5, left=0.3, right=0.5, top=0.90, bottom=0.67, hspace=0.2) WT_1_heatmap_ax = fig.add_subplot(gs1[0, :-1]) WT_3_heatmap_ax = fig.add_subplot(gs1_2[0, :-1]) Df_1_heatmap_ax = fig.add_subplot(gs1[1, :-1]) Df_3_heatmap_ax = fig.add_subplot(gs1_2[1, :-1]) gs_cbar = plt.GridSpec( 2, 10, left=0.3, right=0.5, top=0.90, bottom=0.67, hspace=0.2) WT_colorbar_ax = fig.add_subplot(gs_cbar[0, -1]) Df_colorbar_ax = fig.add_subplot(gs_cbar[1, -1]) gs2 = plt.GridSpec(1, 10, left=0.1, right=0.5, top=0.6, bottom=0.45) pf_close_fraction_ax = fig.add_subplot(gs2[0, :4]) pf_close_behav_corr_ax = fig.add_subplot(gs2[0, 5:]) frac_near_range_2 = (-0.051, 0.551) behav_range_2 = (-0.051, 0.551) # # Heatmaps # WT_cmap = sns.light_palette(WT_color, as_cmap=True) WT_dataframe = lab.ExperimentGroup.dataframe( WT_expt_grp, include_columns=['X_condition', 'X_day', 'X_session']) WT_1_expt_grp = WT_expt_grp.subGroup(list( WT_dataframe[ (WT_dataframe['X_condition'] == 'C') & (WT_dataframe['X_day'] == '0') & (WT_dataframe['X_session'] == '0')]['expt'])) place.plotPositionHeatmap( WT_1_expt_grp, roi_filter=WT_filter, ax=WT_1_heatmap_ax, norm='individual', cbar_visible=False, cmap=WT_cmap, plotting_order='place_cells_only', show_belt=False, reward_in_middle=True) fix_heatmap_ax(WT_1_heatmap_ax, WT_1_expt_grp) WT_1_heatmap_ax.set_title(r'Condition $\mathrm{III}$: Day 1') WT_1_heatmap_ax.set_ylabel(WT_label) WT_1_heatmap_ax.set_xlabel('') WT_3_expt_grp = WT_expt_grp.subGroup(list( WT_dataframe[ (WT_dataframe['X_condition'] == 'C') & (WT_dataframe['X_day'] == '2') & (WT_dataframe['X_session'] == '0')]['expt'])) place.plotPositionHeatmap( WT_3_expt_grp, roi_filter=WT_filter, ax=WT_3_heatmap_ax, norm='individual', cbar_visible=True, cax=WT_colorbar_ax, cmap=WT_cmap, plotting_order='place_cells_only', show_belt=False, reward_in_middle=True) fix_heatmap_ax(WT_3_heatmap_ax, WT_3_expt_grp) WT_3_heatmap_ax.set_title(r'Condition $\mathrm{III}$: Day 3') WT_3_heatmap_ax.set_ylabel('') WT_3_heatmap_ax.set_xlabel('') WT_colorbar_ax.set_yticklabels(['Min', 'Max']) Df_cmap = sns.light_palette(Df_color, as_cmap=True) Df_dataframe = lab.ExperimentGroup.dataframe( Df_expt_grp, include_columns=['X_condition', 'X_day', 'X_session']) Df_1_expt_grp = Df_expt_grp.subGroup(list( Df_dataframe[ (Df_dataframe['X_condition'] == 'C') & (Df_dataframe['X_day'] == '0') & (Df_dataframe['X_session'] == '2')]['expt'])) place.plotPositionHeatmap( Df_1_expt_grp, roi_filter=Df_filter, ax=Df_1_heatmap_ax, norm='individual', cbar_visible=False, cmap=Df_cmap, plotting_order='place_cells_only', show_belt=False, reward_in_middle=True) fix_heatmap_ax(Df_1_heatmap_ax, Df_1_expt_grp) Df_1_heatmap_ax.set_ylabel(Df_label) Df_3_expt_grp = Df_expt_grp.subGroup(list( Df_dataframe[ (Df_dataframe['X_condition'] == 'C') & (Df_dataframe['X_day'] == '2') & (Df_dataframe['X_session'] == '0')]['expt'])) place.plotPositionHeatmap( Df_3_expt_grp, roi_filter=Df_filter, ax=Df_3_heatmap_ax, norm='individual', cbar_visible=True, cax=Df_colorbar_ax, cmap=Df_cmap, plotting_order='place_cells_only', show_belt=False, reward_in_middle=True) fix_heatmap_ax(Df_3_heatmap_ax, Df_3_expt_grp) Df_3_heatmap_ax.set_ylabel('') Df_colorbar_ax.set_yticklabels(['Min', 'Max']) # # Fraction of PCs near reward # activity_metric = place.centroid_to_position_threshold activity_kwargs = {'method': 'resultant_vector', 'positions': 'reward', 'pcs_only': True, 'threshold': np.pi / 8} behavior_fn = ra.fraction_licks_in_reward_zone behavior_kwargs = {} behavior_label = 'Fraction of licks in reward zone' plotting.plot_metric( pf_close_fraction_ax, expt_grps, metric_fn=activity_metric, roi_filters=roi_filters, groupby=[['expt', 'X_condition', 'X_day']], plotby=['X_condition', 'X_day'], plot_abs=False, plot_method='line', activity_kwargs=activity_kwargs, rotate_labels=False, activity_label='Fraction of place cells near reward', label_every_n=1, colors=colors, markers=markers, markersize=5, return_full_dataframes=False, linestyles=linestyles) pf_close_fraction_ax.axhline(1 / 8., linestyle='--', color='k') pf_close_fraction_ax.set_title('') sns.despine(ax=pf_close_fraction_ax) pf_close_fraction_ax.set_xlabel('Day in Condition') day_number_only_label(pf_close_fraction_ax) label_conditions(pf_close_fraction_ax) pf_close_fraction_ax.legend(loc='upper left', fontsize=6) pf_close_fraction_ax.set_ylim(0, 0.40) pf_close_fraction_ax.set_yticks([0, 0.1, 0.2, 0.3, 0.4]) scatter_kws = {'s': 5} colorby_list = [(expt_grp.label(), 'C') for expt_grp in expt_grps] pf_close_behav_corr_ax.set_xlim(frac_near_range_2) pf_close_behav_corr_ax.set_ylim(behav_range_2) plotting.plot_paired_metrics( expt_grps, first_metric_fn=place.centroid_to_position_threshold, second_metric_fn=behavior_fn, roi_filters=roi_filters, groupby=(('expt',),), colorby=('expt_grp', 'X_condition'), filter_fn=lambda df: df['X_condition'] == 'C', filter_columns=['X_condition'], first_metric_kwargs=activity_kwargs, second_metric_kwargs=behavior_kwargs, first_metric_label='Fraction of place cells near reward', second_metric_label=behavior_label, shuffle_colors=False, fit_reg=True, plot_method='regplot', colorby_list=colorby_list, colors=colors, markers=markers, ax=pf_close_behav_corr_ax, scatter_kws=scatter_kws, truncate=False, linestyles=linestyles) pf_close_behav_corr_ax.set_xlim(frac_near_range_2) pf_close_behav_corr_ax.set_ylim(behav_range_2) pf_close_behav_corr_ax.tick_params(direction='in') pf_close_behav_corr_ax.get_legend().set_visible(False) pf_close_behav_corr_ax.legend(loc='upper left', fontsize=6) misc.save_figure(fig, filename, save_dir=save_dir) plt.close('all') if __name__ == '__main__': main()
import csv from urllib.request import Request, urlopen import dateutil.parser import re from os import system from sys import argv from bs4 import BeautifulSoup from datetime import date import scrape_util default_sale, base_url, prefix = scrape_util.get_market(argv) default_sale = default_sale[0] report_path = 'market-report.php' temp_raw = scrape_util.ReportRaw(argv, prefix) sale_pattern = [ re.compile( r'(?P<name>[^,]+),' r'(?P<city>[^\d,]+),?\s+' r'(?P<head>\d+)\s*' r'(?P<cattle>.+?)[\s_]{2,}' r'(?P<weight>[\d,\.]*)\s+' r'\$(?P<price>[\d,\.]+)\s*' r'(?P<price_type>/Hd|/Cwt)?', re.IGNORECASE ), re.compile( r'(?P<name>.+?)\s{2,}' r'(?P<city>)' r'(?P<head>\d+)\s+' r'(?P<cattle>.+?)\s{2,}' r'(?P<weight>[\d,\.]*)\s+' r'\$(?P<price>[\d,\.]+)\s*' r'(?P<price_type>/Hd|/Cwt)?', re.IGNORECASE ), re.compile( r'(?P<name>[^,]+),' r'(?P<city>.+?)\s{2,}' r'(?P<head>)' r'(?P<cattle>.+?)\s{2,}' r'(?P<weight>[\d,\.]*)\s+' r'\$(?P<price>[\d,\.]+)\s*' r'(?P<price_type>/Hd|/Cwt)?', re.IGNORECASE ), ] not_cattle_pattern = re.compile(r'goat|hog|ewe|buck|lamb|kid|sow|mare', re.IGNORECASE) head_pattern = re.compile(r'([,\d]+)\s+he?a?d', re.IGNORECASE) def get_sale_head(line): """Return the total number of head sold at the sale. If present, the number is usually at the top of the market report.""" for this_line in line: match = head_pattern.search(this_line) if match: return match.group(1).replace(',','') def get_sale_date(this_report): """Return the date of the sale.""" date_string = this_report.get_text().replace('.pdf', '') sale_date = dateutil.parser.parse(date_string, fuzzy=True).date() if sale_date > date.today(): sale_date = None return sale_date def is_sale(this_line): """Determine whether a given line describes a sale of cattle.""" is_not_succinct = len(this_line.split()) > 3 has_price = '$' in this_line return has_price and is_not_succinct def get_sale(line): """Convert the input into a dictionary, with keys matching the CSV column headers in the scrape_util module. """ for p in sale_pattern: match = p.search(line) if match: break if not_cattle_pattern.search(match.group('cattle')): return {} sale = { 'consignor_name': match.group('name'), 'consignor_city': match.group('city'), 'cattle_head': match.group('head'), 'cattle_cattle': match.group('cattle'), 'cattle_avg_weight': match.group('weight').replace(',', '').replace('.', ''), } price = match.group('price').replace(',', '') if match.group('price_type') == '/Hd': sale['cattle_price'] = price else: sale['cattle_price_cwt'] = price sale = {k: v.strip() for k, v in sale.items() if v.strip()} return sale def write_sale(line, this_default_sale, writer): """Extract sales from a list of report lines and write them to a CSV file.""" for this_line in line: if is_sale(this_line): sale = this_default_sale.copy() sale.update(get_sale(this_line)) if sale != this_default_sale: writer.writerow(sale) def main(): # Collect individual reports into a list request = Request( base_url + report_path, headers = scrape_util.url_header, ) with urlopen(request) as io: soup = BeautifulSoup(io.read(), 'lxml') content = soup.find('div', id = 'content') report = content.find_all('a') # Locate existing CSV files archive = scrape_util.ArchiveFolder(argv, prefix) # Write a CSV file for each report not in the archive for this_report in report: if 'horse' in this_report.get_text().lower(): continue sale_date = get_sale_date(this_report) io_name = archive.new_csv(sale_date) # Stop iteration if this report is already archived if not io_name: continue # Initialize the default sale dictionary this_default_sale = default_sale.copy() this_default_sale.update({ 'sale_year': sale_date.year, 'sale_month': sale_date.month, 'sale_day': sale_date.day, }) # create temporary text file from downloaded pdf pdf_url = base_url + this_report['href'] request = Request( pdf_url, headers = scrape_util.url_header, ) with urlopen(request) as io: response = io.read() with temp_raw.open('wb') as io: io.write(response) system(scrape_util.pdftotext.format(str(temp_raw))) # read sale text into line list temp_txt = temp_raw.with_suffix('.txt') with temp_txt.open('r') as io: original_line = [this_line.strip() for this_line in io.readlines() if this_line.strip()] if not original_line: temp_img = temp_raw.with_suffix('.tiff') system(scrape_util.convert.format("-density 400x400", str(temp_raw), str(temp_img))) system(scrape_util.tesseract.format("-c preserve_interword_spaces=1", str(temp_img), str(temp_txt.with_suffix('')))) with temp_txt.open('r') as io: original_line = [this_line.strip() for this_line in io.readlines() if this_line.strip()] temp_raw.clean() # # Default split index set at 120 to handle Jan 22, 2015 report with one column of sale # split_index = 120 # # Look for line with two sales and the index to split the line into two columns # for this_line in original_line: # if re.search(r'([0-9,]+\.[0-9]{2}).+?([0-9,]+\.[0-9]{2})', this_line): # match = re.search(r'(/cwt|/he?a?d?)', this_line, re.IGNORECASE) # if match: # split_index = this_line.find(match.group(1)) + len(match.group()) # break # column1 = list(this_line[0:split_index].strip() for this_line in original_line) # column2 = list(this_line[split_index+1:].strip() for this_line in original_line) # line = column1 + column2 line = list(filter(bool, original_line)) if not line: continue sale_head = get_sale_head(line) this_default_sale['sale_head'] = sale_head # Open a new CSV file and write each sale with io_name.open('w', encoding='utf-8') as io: writer = csv.DictWriter(io, scrape_util.header, lineterminator='\n') writer.writeheader() write_sale(line, this_default_sale, writer) if __name__ == '__main__': main()
import pytest from samplics.sampling import SampleSize, allocate region = ["Dakar", "Kaolack", "Ziguinchor"] pop_size = {"Dakar": 500, "Kaolack": 300, "Ziguinchor": 200} @pytest.mark.xfail(reason="stratum is required") def testation_equal_stratum_error(): allocate(method="equal") def testation_equal(): sizes, rates = allocate(method="equal", stratum=region, pop_size=pop_size, constant=15) assert sizes["Dakar"] == 15 assert sizes["Kaolack"] == 15 assert sizes["Ziguinchor"] == 15 assert rates["Dakar"] == 15 / pop_size["Dakar"] assert rates["Kaolack"] == 15 / pop_size["Kaolack"] assert rates["Ziguinchor"] == 15 / pop_size["Ziguinchor"] assert rates["Dakar"] == sizes["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == sizes["Kaolack"] / pop_size["Kaolack"] assert rates["Ziguinchor"] == sizes["Ziguinchor"] / pop_size["Ziguinchor"] def testation_equal_error(): with pytest.raises(ValueError): allocate(method="equal", stratum=region, pop_size=pop_size, constant=[23]) def testation_proportional(): sizes, rates = allocate( method="proportional", stratum=region, samp_size=100, pop_size=pop_size ) assert sizes["Dakar"] == 50 assert sizes["Kaolack"] == 30 assert sizes["Ziguinchor"] == 20 assert rates["Dakar"] == sizes["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == sizes["Kaolack"] / pop_size["Kaolack"] assert rates["Ziguinchor"] == sizes["Ziguinchor"] / pop_size["Ziguinchor"] def testation_proportional_error(): with pytest.raises(ValueError): allocate(method="equal", stratum=region, samp_size=100, pop_size=5) def testation_fixed_rate_number(): sizes, rates = allocate(method="fixed_rate", stratum=region, pop_size=pop_size, rate=0.05) assert sizes["Dakar"] == 25 assert sizes["Kaolack"] == 15 assert sizes["Ziguinchor"] == 10 assert rates["Dakar"] == 0.05 assert rates["Kaolack"] == 0.05 assert rates["Ziguinchor"] == 0.05 assert rates["Dakar"] == sizes["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == sizes["Kaolack"] / pop_size["Kaolack"] assert rates["Ziguinchor"] == sizes["Ziguinchor"] / pop_size["Ziguinchor"] def testation_fixed_rate_error(): with pytest.raises(ValueError): allocate(method="fixed_rate", stratum=region, pop_size=[5]) def testation_variable_rate(): input_rates = {"Dakar": 0.05, "Kaolack": 0.10, "Ziguinchor": 0.20} sizes, rates = allocate( method="variable_rate", stratum=region, pop_size=pop_size, rate=input_rates ) assert sizes["Dakar"] == 25 assert sizes["Kaolack"] == 30 assert sizes["Ziguinchor"] == 40 assert rates["Dakar"] == 0.05 assert rates["Kaolack"] == 0.10 assert rates["Ziguinchor"] == 0.20 assert rates["Dakar"] == sizes["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == sizes["Kaolack"] / pop_size["Kaolack"] assert rates["Ziguinchor"] == sizes["Ziguinchor"] / pop_size["Ziguinchor"] def testationf_variable_rate_error(): with pytest.raises(ValueError): allocate(method="variable_rate", stratum=region, pop_size=[5]) def testation_proportional_rate(): pop_size2 = {"Dakar": 5000, "Kaolack": 3000, "Ziguinchor": 2000} sizes, rates = allocate( method="proportional_rate", stratum=region, pop_size=pop_size2, rate=0.000005 ) assert sizes["Dakar"] == 125 assert sizes["Kaolack"] == 45 assert sizes["Ziguinchor"] == 20 assert rates["Dakar"] == pytest.approx(0.000005 * pop_size2["Dakar"]) assert rates["Kaolack"] == pytest.approx(0.000005 * pop_size2["Kaolack"]) assert rates["Ziguinchor"] == pytest.approx(0.000005 * pop_size2["Ziguinchor"]) assert rates["Dakar"] == sizes["Dakar"] / pop_size2["Dakar"] assert rates["Kaolack"] == sizes["Kaolack"] / pop_size2["Kaolack"] assert rates["Ziguinchor"] == sizes["Ziguinchor"] / pop_size2["Ziguinchor"] def testation_proportional_rate_error1(): rate = {"Dakar": 0.005, "Kaolack": 0.010, "Ziguinchor": 0.020} with pytest.raises(ValueError): allocate(method="proportional_rate", stratum=region, pop_size=pop_size, rate=rate) def test_deff_proportional_rate_error2(): with pytest.raises(ValueError): allocate(method="fixed_rate", stratum=region, pop_size=[5]) def test_deff_optimum_mean(): stddev = {"Dakar": 5, "Kaolack": 10, "Ziguinchor": 20} sizes, rates = allocate( method="optimum_mean", stratum=region, pop_size=pop_size, rate=0.01, stddev=stddev ) assert sizes["Dakar"] == 25 assert sizes["Kaolack"] == 30 assert sizes["Ziguinchor"] == 40 assert rates["Dakar"] == 0.01 * stddev["Dakar"] assert rates["Kaolack"] == 0.01 * stddev["Kaolack"] assert rates["Ziguinchor"] == 0.01 * stddev["Ziguinchor"] assert rates["Dakar"] == sizes["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == sizes["Kaolack"] / pop_size["Kaolack"] assert rates["Ziguinchor"] == sizes["Ziguinchor"] / pop_size["Ziguinchor"] def testation_optimum_mean_error1(): rate = {"Dakar": 0.005, "Kaolack": 0.010, "Ziguinchor": 0.020} stddev = {"Dakar": 5, "Kaolack": 10, "Ziguinchor": 20} with pytest.raises(ValueError): allocate( method="optimum_mean", stratum=region, pop_size=pop_size, rate=rate, stddev=stddev ) def testation_optimum_mean_error2(): with pytest.raises(ValueError): allocate(method="optimum_mean", stratum=region, pop_size=pop_size, stddev=[5]) def test_deff_optimum_comparison(): stddev = {"Dakar": 50, "Kaolack": 10, "Ziguinchor": 20} sizes, rates = allocate( method="optimum_comparison", stratum=region, pop_size=pop_size, rate=0.5, stddev=stddev ) assert sizes["Dakar"] == 25 assert sizes["Kaolack"] == 5 assert sizes["Ziguinchor"] == 10 assert rates["Dakar"] == 0.5 * stddev["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == 0.5 * stddev["Kaolack"] / pop_size["Kaolack"] assert rates["Ziguinchor"] == 0.5 * stddev["Ziguinchor"] / pop_size["Ziguinchor"] assert rates["Dakar"] == sizes["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == sizes["Kaolack"] / pop_size["Kaolack"] assert rates["Ziguinchor"] == sizes["Ziguinchor"] / pop_size["Ziguinchor"] def testation_optimum_comparison_error1(): rate = {"Dakar": 0.005, "Kaolack": 0.010, "Ziguinchor": 0.020} stddev = {"Dakar": 5, "Kaolack": 10, "Ziguinchor": 20} with pytest.raises(ValueError): allocate( method="optimum_comparison", stratum=region, pop_size=pop_size, rate=rate, stddev=stddev, ) def testation_equal_errors_error2(): with pytest.raises(ValueError): allocate(method="equal_errors", stratum=region, pop_size=pop_size, stddev=[5]) def test_deff_equal_errors(): stddev = {"Dakar": 5, "Kaolack": 1, "Ziguinchor": 2} sizes, rates = allocate( method="equal_errors", stratum=region, pop_size=pop_size, constant=5, stddev=stddev ) assert sizes["Dakar"] == 125 assert sizes["Kaolack"] == 5 assert sizes["Ziguinchor"] == 20 assert rates["Dakar"] == 5 * stddev["Dakar"] * stddev["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == 5 * stddev["Kaolack"] * stddev["Kaolack"] / pop_size["Kaolack"] assert ( rates["Ziguinchor"] == 5 * stddev["Ziguinchor"] * stddev["Ziguinchor"] / pop_size["Ziguinchor"] ) assert rates["Dakar"] == sizes["Dakar"] / pop_size["Dakar"] assert rates["Kaolack"] == sizes["Kaolack"] / pop_size["Kaolack"] assert rates["Ziguinchor"] == sizes["Ziguinchor"] / pop_size["Ziguinchor"] def testation_equal_errors_error1(): stddev = {"Dakar": 5, "Kaolack": 10, "Ziguinchor": 20} with pytest.raises(ValueError): allocate( method="equal_errors", stratum=region, pop_size=pop_size, constant=[9], stddev=stddev, ) def testation_optimum_comparison_error2(): with pytest.raises(ValueError): allocate(method="optimum_comparison", stratum=region, pop_size=pop_size, stddev=[5]) ## Design effects deff_calculation = SampleSize() def test_deff_int(): assert deff_calculation.deff(30, 0.01) == 1.29 def test_deff_float(): assert deff_calculation.deff(15.5, 0.03) == 1.435 def test_deff_dict(): m = {"stratum1": 30, "stratum2": 15.5, "stratum3": 50} icc = {"stratum1": 0.01, "stratum2": 0.03, "stratum3": 0.10} deff = deff_calculation.deff(m, icc) assert deff == {"stratum1": 1.29, "stratum2": 1.435, "stratum3": 5.9} ## Wald's method size_nat_wald = SampleSize() def test_size_nat_wald_basics(): assert size_nat_wald.parameter == "proportion" assert size_nat_wald.method == "wald" assert size_nat_wald.stratification == False def test_size_nat_wald_size(): size_nat_wald.calculate(0.80, 0.10) assert size_nat_wald.samp_size == 62 assert size_nat_wald.deff_c == 1.0 assert size_nat_wald.target == 0.80 assert size_nat_wald.half_ci == 0.1 def test_size_nat_wald_size_with_deff(): size_nat_wald.calculate(0.80, 0.10, deff=1.5) assert size_nat_wald.samp_size == 93 assert size_nat_wald.deff_c == 1.5 assert size_nat_wald.target == 0.80 assert size_nat_wald.half_ci == 0.1 def test_size_nat_wald_df(): size_nat_wald.calculate(0.80, 0.10) size_df = size_nat_wald.to_dataframe() assert (size_df.columns == ["_target", "_half_ci", "_samp_size"]).all() ## Wald's method - stratified size_str_wald = SampleSize(parameter="Proportion", method="Wald", stratification=True) target = {"stratum1": 0.95, "stratum2": 0.70, "stratum3": 0.30} half_ci = {"stratum1": 0.30, "stratum2": 0.10, "stratum3": 0.15} deff = {"stratum1": 1, "stratum2": 1.5, "stratum3": 2.5} def test_size_str_wald_basics(): assert size_str_wald.parameter == "proportion" assert size_str_wald.method == "wald" assert size_str_wald.stratification == True def test_size_str_wald_size1(): size_str_wald.calculate(target, 0.10) assert size_str_wald.samp_size["stratum1"] == 19 assert size_str_wald.samp_size["stratum2"] == 81 assert size_str_wald.samp_size["stratum3"] == 81 def test_size_str_wald_size2(): size_str_wald.calculate(0.8, half_ci) assert size_str_wald.samp_size["stratum1"] == 7 assert size_str_wald.samp_size["stratum2"] == 62 assert size_str_wald.samp_size["stratum3"] == 28 def test_size_str_wald_size3(): size_str_wald.calculate(0.8, 0.10, deff) assert size_str_wald.samp_size["stratum1"] == 62 assert size_str_wald.samp_size["stratum2"] == 93 assert size_str_wald.samp_size["stratum3"] == 154 def test_size_str_wald_size4(): size_str_wald.calculate(target, half_ci, deff) assert size_str_wald.samp_size["stratum1"] == 3 assert size_str_wald.samp_size["stratum2"] == 122 assert size_str_wald.samp_size["stratum3"] == 90 def test_size_str_wald_size5(): size_str_wald.calculate(0.8, 0.1, 1.5, number_strata=5) assert size_str_wald.samp_size["_stratum_1"] == 93 assert size_str_wald.samp_size["_stratum_2"] == 93 assert size_str_wald.samp_size["_stratum_3"] == 93 assert size_str_wald.samp_size["_stratum_4"] == 93 assert size_str_wald.samp_size["_stratum_5"] == 93 def test_size_str_wald_df(): size_str_wald.calculate(0.80, 0.10, number_strata=5) size_df = size_str_wald.to_dataframe() assert size_df.shape[0] == 5 assert (size_df.columns == ["_stratum", "_target", "_half_ci", "_samp_size"]).all() ## Fleiss' method size_nat_fleiss = SampleSize(method="fleiss") def test_size_nat_fleiss_basics(): assert size_nat_fleiss.parameter == "proportion" assert size_nat_fleiss.method == "fleiss" assert size_nat_fleiss.stratification == False def test_size_nat_fleiss_size1a(): size_nat_fleiss.calculate(0.80, 0.10) assert size_nat_fleiss.samp_size == 88 assert size_nat_fleiss.deff_c == 1.0 assert size_nat_fleiss.target == 0.80 assert size_nat_fleiss.half_ci == 0.1 def test_size_nat_fleiss_size1b(): size_nat_fleiss.calculate(0.20, 0.10) assert size_nat_fleiss.samp_size == 88 assert size_nat_fleiss.deff_c == 1.0 assert size_nat_fleiss.target == 0.20 assert size_nat_fleiss.half_ci == 0.1 def test_size_nat_fleiss_size2a(): size_nat_fleiss.calculate(0.95, 0.06) assert size_nat_fleiss.samp_size == 132 assert size_nat_fleiss.deff_c == 1.0 assert size_nat_fleiss.target == 0.95 assert size_nat_fleiss.half_ci == 0.06 def test_size_nat_fleiss_size2b(): size_nat_fleiss.calculate(0.05, 0.06) assert size_nat_fleiss.samp_size == 132 assert size_nat_fleiss.deff_c == 1.0 assert size_nat_fleiss.target == 0.05 assert size_nat_fleiss.half_ci == 0.06 def test_size_nat_fleiss_size3(): size_nat_fleiss.calculate(0.70, 0.03) assert size_nat_fleiss.samp_size == 1097 assert size_nat_fleiss.deff_c == 1.0 assert size_nat_fleiss.target == 0.70 assert size_nat_fleiss.half_ci == 0.03 def test_size_nat_fleiss_size4(): size_nat_fleiss.calculate(0.85, 0.03) assert size_nat_fleiss.samp_size == 663 assert size_nat_fleiss.deff_c == 1.0 assert size_nat_fleiss.target == 0.85 assert size_nat_fleiss.half_ci == 0.03 def test_size_nat_fleiss_size_with_deff1a(): size_nat_fleiss.calculate(0.80, 0.10, deff=1.5) assert size_nat_fleiss.samp_size == 132 assert size_nat_fleiss.deff_c == 1.5 assert size_nat_fleiss.target == 0.80 assert size_nat_fleiss.half_ci == 0.1 def test_size_nat_fleiss_size_with_deff1b(): size_nat_fleiss.calculate(0.20, 0.10, deff=1.5) assert size_nat_fleiss.samp_size == 132 assert size_nat_fleiss.deff_c == 1.5 assert size_nat_fleiss.target == 0.20 assert size_nat_fleiss.half_ci == 0.1 def test_size_nat_fleiss_size_with_deff2a(): size_nat_fleiss.calculate(0.95, 0.06, deff=1.5) assert size_nat_fleiss.samp_size == 197 assert size_nat_fleiss.deff_c == 1.5 assert size_nat_fleiss.target == 0.95 assert size_nat_fleiss.half_ci == 0.06 def test_size_nat_fleiss_size_with_deff2b(): size_nat_fleiss.calculate(0.05, 0.06, deff=1.5) assert size_nat_fleiss.samp_size == 197 assert size_nat_fleiss.deff_c == 1.5 assert size_nat_fleiss.target == 0.05 assert size_nat_fleiss.half_ci == 0.06 def test_size_nat_fleiss_size_with_deff3(): size_nat_fleiss.calculate(0.70, 0.03, deff=1.5) assert size_nat_fleiss.samp_size == 1646 assert size_nat_fleiss.deff_c == 1.5 assert size_nat_fleiss.target == 0.70 assert size_nat_fleiss.half_ci == 0.03 def test_size_nat_fleiss_size_with_deff4(): size_nat_fleiss.calculate(0.85, 0.03, deff=1.5) assert size_nat_fleiss.samp_size == 994 assert size_nat_fleiss.deff_c == 1.5 assert size_nat_fleiss.target == 0.85 assert size_nat_fleiss.half_ci == 0.03 def test_size_nat_fleiss_df(): size_nat_fleiss.calculate(0.80, 0.10) size_df = size_nat_fleiss.to_dataframe() assert (size_df.columns == ["_target", "_half_ci", "_samp_size"]).all() ## Fleiss' method - stratified size_str_fleiss = SampleSize(parameter="Proportion", method="Fleiss", stratification=True) target2 = {"stratum1": 0.95, "stratum2": 0.70, "stratum3": 0.30} half_ci2 = {"stratum1": 0.03, "stratum2": 0.10, "stratum3": 0.05} deff2 = {"stratum1": 1, "stratum2": 1.5, "stratum3": 2.5} def test_size_str_fleiss_basics(): assert size_str_fleiss.parameter == "proportion" assert size_str_fleiss.method == "fleiss" assert size_str_fleiss.stratification == True def test_size_str_fleiss_size1(): size_str_fleiss.calculate(target2, 0.10) assert size_str_fleiss.samp_size["stratum1"] == 70 assert size_str_fleiss.samp_size["stratum2"] == 103 assert size_str_fleiss.samp_size["stratum3"] == 103 def test_size_str_fleiss_size2(): size_str_fleiss.calculate(0.8, half_ci2) assert size_str_fleiss.samp_size["stratum1"] == 788 assert size_str_fleiss.samp_size["stratum2"] == 88 assert size_str_fleiss.samp_size["stratum3"] == 306 def test_size_str_fleiss_size3(): size_str_fleiss.calculate(0.8, 0.10, deff2) assert size_str_fleiss.samp_size["stratum1"] == 88 assert size_str_fleiss.samp_size["stratum2"] == 132 assert size_str_fleiss.samp_size["stratum3"] == 220 def test_size_str_fleiss_size4(): size_str_fleiss.calculate(target2, half_ci2, deff2) assert size_str_fleiss.samp_size["stratum1"] == 354 assert size_str_fleiss.samp_size["stratum2"] == 154 assert size_str_fleiss.samp_size["stratum3"] == 1002 def test_size_str_fleiss_size5(): size_str_fleiss.calculate(0.8, 0.1, 1.5, number_strata=5) assert size_str_fleiss.samp_size["_stratum_1"] == 132 assert size_str_fleiss.samp_size["_stratum_2"] == 132 assert size_str_fleiss.samp_size["_stratum_3"] == 132 assert size_str_fleiss.samp_size["_stratum_4"] == 132 assert size_str_fleiss.samp_size["_stratum_5"] == 132 def test_size_str_fleiss_df(): size_str_fleiss.calculate(0.80, 0.10, number_strata=5) size_df = size_str_fleiss.to_dataframe() assert size_df.shape[0] == 5 assert (size_df.columns == ["_stratum", "_target", "_half_ci", "_samp_size"]).all()
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for CRF.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import itertools import numpy as np from tensorflow.contrib.crf.python.ops import crf from tensorflow.python.framework import constant_op from tensorflow.python.ops import array_ops from tensorflow.python.ops import math_ops from tensorflow.python.platform import test class CrfTest(test.TestCase): def calculateSequenceScore(self, inputs, transition_params, tag_indices, sequence_lengths): expected_unary_score = sum( inputs[i][tag_indices[i]] for i in range(sequence_lengths)) expected_binary_score = sum( transition_params[tag_indices[i], tag_indices[i + 1]] for i in range(sequence_lengths - 1)) return expected_unary_score + expected_binary_score def testCrfSequenceScore(self): transition_params = np.array( [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32) # Test both the length-1 and regular cases. sequence_lengths_list = [ np.array(3, dtype=np.int32), np.array(1, dtype=np.int32) ] inputs_list = [ np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32), np.array([[4, 5, -3]], dtype=np.float32), ] tag_indices_list = [ np.array([1, 2, 1, 0], dtype=np.int32), np.array([1], dtype=np.int32) ] for sequence_lengths, inputs, tag_indices in zip(sequence_lengths_list, inputs_list, tag_indices_list): with self.cached_session() as sess: sequence_score = crf.crf_sequence_score( inputs=array_ops.expand_dims(inputs, 0), tag_indices=array_ops.expand_dims(tag_indices, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), transition_params=constant_op.constant(transition_params)) sequence_score = array_ops.squeeze(sequence_score, [0]) tf_sequence_score = sess.run(sequence_score) expected_sequence_score = self.calculateSequenceScore( inputs, transition_params, tag_indices, sequence_lengths) self.assertAllClose(tf_sequence_score, expected_sequence_score) def testCrfMultiTagSequenceScore(self): transition_params = np.array( [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32) # Test both the length-1 and regular cases. sequence_lengths_list = [ np.array(3, dtype=np.int32), np.array(1, dtype=np.int32) ] inputs_list = [ np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32), np.array([[4, 5, -3]], dtype=np.float32), ] tag_bitmap_list = [ np.array( [[True, True, False], [True, False, True], [False, True, True], [True, False, True]], dtype=np.bool), np.array([[True, True, False]], dtype=np.bool) ] for sequence_lengths, inputs, tag_bitmap in zip( sequence_lengths_list, inputs_list, tag_bitmap_list): with self.cached_session() as sess: sequence_score = crf.crf_multitag_sequence_score( inputs=array_ops.expand_dims(inputs, 0), tag_bitmap=array_ops.expand_dims(tag_bitmap, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), transition_params=constant_op.constant(transition_params)) sequence_score = array_ops.squeeze(sequence_score, [0]) tf_sum_sequence_score = sess.run(sequence_score) all_indices_list = [ single_index_bitmap.nonzero()[0] for single_index_bitmap in tag_bitmap[:sequence_lengths] ] expected_sequence_scores = [ self.calculateSequenceScore(inputs, transition_params, indices, sequence_lengths) for indices in itertools.product(*all_indices_list) ] expected_log_sum_exp_sequence_scores = np.logaddexp.reduce( expected_sequence_scores) self.assertAllClose(tf_sum_sequence_score, expected_log_sum_exp_sequence_scores) def testCrfUnaryScore(self): inputs = np.array( [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32) for dtype in (np.int32, np.int64): tag_indices = np.array([1, 2, 1, 0], dtype=dtype) sequence_lengths = np.array(3, dtype=np.int32) with self.cached_session() as sess: unary_score = crf.crf_unary_score( tag_indices=array_ops.expand_dims(tag_indices, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), inputs=array_ops.expand_dims(inputs, 0)) unary_score = array_ops.squeeze(unary_score, [0]) tf_unary_score = sess.run(unary_score) expected_unary_score = sum(inputs[i][tag_indices[i]] for i in range(sequence_lengths)) self.assertAllClose(tf_unary_score, expected_unary_score) def testCrfBinaryScore(self): tag_indices = np.array([1, 2, 1, 0], dtype=np.int32) transition_params = np.array( [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32) sequence_lengths = np.array(3, dtype=np.int32) with self.cached_session() as sess: binary_score = crf.crf_binary_score( tag_indices=array_ops.expand_dims(tag_indices, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), transition_params=constant_op.constant(transition_params)) binary_score = array_ops.squeeze(binary_score, [0]) tf_binary_score = sess.run(binary_score) expected_binary_score = sum( transition_params[tag_indices[i], tag_indices[i + 1]] for i in range(sequence_lengths - 1)) self.assertAllClose(tf_binary_score, expected_binary_score) def testCrfLogNorm(self): transition_params = np.array( [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32) # Test both the length-1 and regular cases. sequence_lengths_list = [ np.array(3, dtype=np.int32), np.array(1, dtype=np.int64) ] inputs_list = [ np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32), np.array([[3, -1, 3]], dtype=np.float32), ] tag_indices_list = [ np.array([1, 2, 1, 0], dtype=np.int32), np.array([2], dtype=np.int32) ] for sequence_lengths, inputs, tag_indices in zip(sequence_lengths_list, inputs_list, tag_indices_list): num_words = inputs.shape[0] num_tags = inputs.shape[1] with self.cached_session() as sess: all_sequence_scores = [] # Compare the dynamic program with brute force computation. for tag_indices in itertools.product( range(num_tags), repeat=sequence_lengths): tag_indices = list(tag_indices) tag_indices.extend([0] * (num_words - sequence_lengths)) all_sequence_scores.append( crf.crf_sequence_score( inputs=array_ops.expand_dims(inputs, 0), tag_indices=array_ops.expand_dims(tag_indices, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), transition_params=constant_op.constant(transition_params))) brute_force_log_norm = math_ops.reduce_logsumexp(all_sequence_scores) log_norm = crf.crf_log_norm( inputs=array_ops.expand_dims(inputs, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), transition_params=constant_op.constant(transition_params)) log_norm = array_ops.squeeze(log_norm, [0]) tf_brute_force_log_norm, tf_log_norm = sess.run( [brute_force_log_norm, log_norm]) self.assertAllClose(tf_log_norm, tf_brute_force_log_norm) def testCrfLogNormZeroSeqLength(self): """ Test `crf_log_norm` when `sequence_lengths` contains one or more zeros. """ with self.cached_session() as sess: inputs = constant_op.constant(np.ones([2, 10, 5], dtype=np.float32)) transition_params = constant_op.constant(np.ones([5, 5], dtype=np.float32)) sequence_lengths = constant_op.constant(np.zeros([2], dtype=np.int32)) expected_log_norm = np.zeros([2], dtype=np.float32) log_norm = crf.crf_log_norm(inputs, sequence_lengths, transition_params) tf_log_norm = sess.run(log_norm) self.assertAllClose(tf_log_norm, expected_log_norm) def testCrfLogLikelihood(self): inputs = np.array( [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32) transition_params = np.array( [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32) sequence_lengths = np.array(3, dtype=np.int32) num_words = inputs.shape[0] num_tags = inputs.shape[1] with self.cached_session() as sess: all_sequence_log_likelihoods = [] # Make sure all probabilities sum to 1. for tag_indices in itertools.product( range(num_tags), repeat=sequence_lengths): tag_indices = list(tag_indices) tag_indices.extend([0] * (num_words - sequence_lengths)) sequence_log_likelihood, _ = crf.crf_log_likelihood( inputs=array_ops.expand_dims(inputs, 0), tag_indices=array_ops.expand_dims(tag_indices, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), transition_params=constant_op.constant(transition_params)) all_sequence_log_likelihoods.append(sequence_log_likelihood) total_log_likelihood = math_ops.reduce_logsumexp( all_sequence_log_likelihoods) tf_total_log_likelihood = sess.run(total_log_likelihood) self.assertAllClose(tf_total_log_likelihood, 0.0) def testViterbiDecode(self): inputs = np.array( [[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32) transition_params = np.array( [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32) sequence_lengths = np.array(3, dtype=np.int32) num_words = inputs.shape[0] num_tags = inputs.shape[1] with self.cached_session() as sess: all_sequence_scores = [] all_sequences = [] # Compare the dynamic program with brute force computation. for tag_indices in itertools.product( range(num_tags), repeat=sequence_lengths): tag_indices = list(tag_indices) tag_indices.extend([0] * (num_words - sequence_lengths)) all_sequences.append(tag_indices) sequence_score = crf.crf_sequence_score( inputs=array_ops.expand_dims(inputs, 0), tag_indices=array_ops.expand_dims(tag_indices, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), transition_params=constant_op.constant(transition_params)) sequence_score = array_ops.squeeze(sequence_score, [0]) all_sequence_scores.append(sequence_score) tf_all_sequence_scores = sess.run(all_sequence_scores) expected_max_sequence_index = np.argmax(tf_all_sequence_scores) expected_max_sequence = all_sequences[expected_max_sequence_index] expected_max_score = tf_all_sequence_scores[expected_max_sequence_index] actual_max_sequence, actual_max_score = crf.viterbi_decode( inputs[:sequence_lengths], transition_params) self.assertAllClose(actual_max_score, expected_max_score) self.assertEqual(actual_max_sequence, expected_max_sequence[:sequence_lengths]) def testCrfDecode(self): transition_params = np.array( [[-3, 5, -2], [3, 4, 1], [1, 2, 1]], dtype=np.float32) # Test both the length-1 and regular cases. sequence_lengths_list = [ np.array(3, dtype=np.int32), np.array(1, dtype=np.int64) ] inputs_list = [ np.array([[4, 5, -3], [3, -1, 3], [-1, 2, 1], [0, 0, 0]], dtype=np.float32), np.array([[-1, 2, 1]], dtype=np.float32), ] tag_indices_list = [ np.array([1, 2, 1, 0], dtype=np.int32), np.array([2], dtype=np.int32) ] for sequence_lengths, inputs, tag_indices in zip(sequence_lengths_list, inputs_list, tag_indices_list): num_words = inputs.shape[0] num_tags = inputs.shape[1] with self.cached_session() as sess: all_sequence_scores = [] all_sequences = [] # Compare the dynamic program with brute force computation. for tag_indices in itertools.product( range(num_tags), repeat=sequence_lengths): tag_indices = list(tag_indices) tag_indices.extend([0] * (num_words - sequence_lengths)) all_sequences.append(tag_indices) sequence_score = crf.crf_sequence_score( inputs=array_ops.expand_dims(inputs, 0), tag_indices=array_ops.expand_dims(tag_indices, 0), sequence_lengths=array_ops.expand_dims(sequence_lengths, 0), transition_params=constant_op.constant(transition_params)) sequence_score = array_ops.squeeze(sequence_score, [0]) all_sequence_scores.append(sequence_score) tf_all_sequence_scores = sess.run(all_sequence_scores) expected_max_sequence_index = np.argmax(tf_all_sequence_scores) expected_max_sequence = all_sequences[expected_max_sequence_index] expected_max_score = tf_all_sequence_scores[expected_max_sequence_index] actual_max_sequence, actual_max_score = crf.crf_decode( array_ops.expand_dims(inputs, 0), constant_op.constant(transition_params), array_ops.expand_dims(sequence_lengths, 0)) actual_max_sequence = array_ops.squeeze(actual_max_sequence, [0]) actual_max_score = array_ops.squeeze(actual_max_score, [0]) tf_actual_max_sequence, tf_actual_max_score = sess.run( [actual_max_sequence, actual_max_score]) self.assertAllClose(tf_actual_max_score, expected_max_score) self.assertEqual(list(tf_actual_max_sequence[:sequence_lengths]), expected_max_sequence[:sequence_lengths]) def testCrfDecodeZeroSeqLength(self): """ Test that crf_decode works when sequence_length contains one or more zeros. """ with self.cached_session() as sess: inputs = constant_op.constant(np.ones([2, 10, 5], dtype=np.float32)) transition_params = constant_op.constant(np.ones([5, 5], dtype=np.float32)) sequence_lengths = constant_op.constant(np.zeros([2], dtype=np.int32)) tags, scores = crf.crf_decode(inputs, transition_params, sequence_lengths) tf_tags, tf_scores = sess.run([tags, scores]) self.assertEqual(len(tf_tags.shape), 2) self.assertEqual(len(tf_scores.shape), 1) if __name__ == "__main__": test.main()
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. """ Utilities for running tests. """ import os import random import tempfile import time import unittest from typing import Any, Dict, List, Optional, Sequence, Union import numpy as np import torch from hydra.experimental import compose, initialize from mephisto.abstractions.blueprint import SharedTaskState from mephisto.abstractions.databases.local_database import LocalMephistoDB from mephisto.operations.operator import Operator from mephisto.tools.scripts import augment_config_from_db from pytest_regressions.data_regression import DataRegressionFixture class AbstractCrowdsourcingTest: """ Abstract class for end-to-end tests of Mephisto-based crowdsourcing tasks. Allows for setup and teardown of the operator, as well as for config specification and agent registration. """ def _setup(self): """ To be run before a test. Should be called in a pytest setup/teardown fixture. """ random.seed(0) np.random.seed(0) torch.manual_seed(0) self.operator = None self.server = None def _teardown(self): """ To be run after a test. Should be called in a pytest setup/teardown fixture. """ if self.operator is not None: self.operator.force_shutdown() if self.server is not None: self.server.shutdown_mock() def _set_up_config( self, task_directory: str, overrides: Optional[List[str]] = None, config_name: str = "example", ): """ Set up the config and database. Uses the Hydra compose() API for unit testing and a temporary directory to store the test database. :param blueprint_type: string uniquely specifying Blueprint class :param task_directory: directory containing the `conf/` configuration folder. Will be injected as `${task_dir}` in YAML files. :param overrides: additional config overrides """ # Define the configuration settings relative_task_directory = os.path.relpath( task_directory, os.path.dirname(__file__) ) relative_config_path = os.path.join( relative_task_directory, 'hydra_configs', 'conf' ) if overrides is None: overrides = [] with initialize(config_path=relative_config_path): self.config = compose( config_name=config_name, overrides=[ f'mephisto/architect=mock', f'mephisto/provider=mock', f'+task_dir={task_directory}', f'+current_time={int(time.time())}', ] + overrides, ) self.data_dir = tempfile.mkdtemp() self.database_path = os.path.join(self.data_dir, "mephisto.db") self.db = LocalMephistoDB(self.database_path) self.config = augment_config_from_db(self.config, self.db) self.config.mephisto.architect.should_run_server = True def _set_up_server(self, shared_state: Optional[SharedTaskState] = None): """ Set up the operator and server. """ self.operator = Operator(self.db) self.operator.validate_and_run_config( self.config.mephisto, shared_state=shared_state ) self.server = self._get_channel_info().job.architect.server def _get_channel_info(self): """ Return channel info for the currently running job. """ channels = list(self.operator.supervisor.channels.values()) if len(channels) > 0: return channels[0] else: raise ValueError('No channel could be detected!') def _register_mock_agents( self, num_agents: int = 1, assume_onboarding: bool = False ) -> List[str]: """ Register mock agents for testing and onboard them if needed, taking the place of crowdsourcing workers. Specify the number of agents to register. Return the agents' IDs after creation. """ for idx in range(num_agents): mock_worker_name = f"MOCK_WORKER_{idx:d}" max_num_tries = 6 initial_wait_time = 0.5 # In seconds num_tries = 0 wait_time = initial_wait_time while num_tries < max_num_tries: try: # Register the worker self.server.register_mock_worker(mock_worker_name) workers = self.db.find_workers(worker_name=mock_worker_name) worker_id = workers[0].db_id # Register the agent mock_agent_details = f"FAKE_ASSIGNMENT_{idx:d}" self.server.register_mock_agent(worker_id, mock_agent_details) if assume_onboarding: # Submit onboarding from the agent onboard_agents = self.db.find_onboarding_agents() onboard_data = {"onboarding_data": {"success": True}} self.server.register_mock_agent_after_onboarding( worker_id, onboard_agents[0].get_agent_id(), onboard_data ) _ = self.db.find_agents()[idx] # Make sure the agent can be found, or else raise an IndexError break except IndexError: num_tries += 1 print( f'The agent could not be registered after {num_tries:d} ' f'attempt(s), out of {max_num_tries:d} attempts total. Waiting ' f'for {wait_time:0.1f} seconds...' ) time.sleep(wait_time) wait_time *= 2 # Wait for longer next time else: raise ValueError('The worker could not be registered!') # Get all agents' IDs agents = self.db.find_agents() if len(agents) != num_agents: raise ValueError( f'The actual number of agents is {len(agents):d} instead of the ' f'desired {num_agents:d}!' ) agent_ids = [agent.db_id for agent in agents] return agent_ids class AbstractOneTurnCrowdsourcingTest(AbstractCrowdsourcingTest): """ Abstract class for end-to-end tests of one-turn crowdsourcing tasks. Useful for Blueprints such as AcuteEvalBlueprint and StaticReactBlueprint for which all of the worker's responses are sent to the backend code at once. """ def _test_agent_state( self, task_data: Dict[str, Any], data_regression: DataRegressionFixture ): """ Test that the actual agent state matches the expected state. Get the final agent state given the input task data and check that it is as expected. """ state = self._get_agent_state(task_data=task_data) self._check_agent_state(state=state, data_regression=data_regression) def _get_agent_state(self, task_data: Dict[str, Any]): """ Submit user task data and return the final agent state. Register a mock human agent, request initial data to define the 'inputs' field of the agent state, make the agent act to define the 'outputs' field of the agent state, and return the agent state. """ # Set up the mock human agent if self.config.mephisto.blueprint.get("onboarding_qualification", None): agent_id = self._register_mock_agents(num_agents=1, assume_onboarding=True)[ 0 ] else: agent_id = self._register_mock_agents(num_agents=1)[0] # Set initial data self.server.request_init_data(agent_id) # Make agent act self.server.send_agent_act( agent_id, {"MEPHISTO_is_submit": True, "task_data": task_data} ) return self.db.find_agents()[0].state.get_data() def _check_agent_state( self, state: Dict[str, Any], data_regression: DataRegressionFixture ): """ Given an agent state, test that it is as expected. """ del state['times'] # Delete variable timestamps data_regression.check(state) class AbstractParlAIChatTest(AbstractCrowdsourcingTest): """ Abstract class for end-to-end tests of one-turn ParlAIChatBlueprint tasks. """ def _test_agent_states( self, num_agents: int, agent_display_ids: Sequence[str], agent_messages: List[Sequence[str]], form_messages: Sequence[str], form_task_data: Sequence[Dict[str, Any]], expected_states: Sequence[Dict[str, Any]], agent_task_data: Optional[List[Sequence[Dict[str, Any]]]] = None, ): """ Test that the actual agent states match the expected states. Register mock human agents, request initial data to define the 'inputs' fields of the agent states, make the agents have a conversation to define the 'outputs' fields of the agent states, and then check that the agent states all match the desired agent states. """ # If no task data was supplied, create empty task data if agent_task_data is None: agent_task_data = [] for message_round in agent_messages: agent_task_data.append([{}] * len(message_round)) # Set up the mock human agents agent_ids = self._register_mock_agents(num_agents=num_agents) # # Feed messages to the agents # Set initial data for agent_id in agent_ids: self.server.request_init_data(agent_id) # Have agents talk to each other assert len(agent_messages) == len(agent_task_data) for message_round, task_data_round in zip(agent_messages, agent_task_data): assert len(message_round) == len(task_data_round) == len(agent_ids) for agent_id, agent_display_id, message, task_data in zip( agent_ids, agent_display_ids, message_round, task_data_round ): self._send_agent_message( agent_id=agent_id, agent_display_id=agent_display_id, text=message, task_data=task_data, ) # Have agents fill out the form for agent_idx, agent_id in enumerate(agent_ids): self.server.send_agent_act( agent_id=agent_id, act_content={ 'text': form_messages[agent_idx], 'task_data': form_task_data[agent_idx], 'id': agent_display_ids[agent_idx], 'episode_done': False, }, ) # Submit the HIT for agent_id in agent_ids: self.server.send_agent_act( agent_id=agent_id, act_content={ 'task_data': {'final_data': {}}, 'MEPHISTO_is_submit': True, }, ) # # Check that the inputs and outputs are as expected # Wait until all messages have arrived wait_time = 5.0 # In seconds max_num_tries = 30 # max_num_tries * wait_time is the max time to wait num_tries = 0 while num_tries < max_num_tries: actual_states = [agent.state.get_data() for agent in self.db.find_agents()] assert len(actual_states) == len(expected_states) expected_num_messages = sum( len(state['outputs']['messages']) for state in expected_states ) actual_num_messages = sum( len(state['outputs']['messages']) for state in actual_states ) if expected_num_messages == actual_num_messages: break else: num_tries += 1 print( f'The expected number of messages is ' f'{expected_num_messages:d}, but the actual number of messages ' f'is {actual_num_messages:d}! Waiting for {wait_time:0.1f} seconds ' f'for more messages to arrive (try #{num_tries:d} of ' f'{max_num_tries:d})...' ) time.sleep(wait_time) else: actual_num_messages = sum( len(state['outputs']['messages']) for state in actual_states ) print(f'\nPrinting all {actual_num_messages:d} messages received:') for state in actual_states: for message in state['outputs']['messages']: print(message) raise ValueError( f'The expected number of messages ({expected_num_messages:d}) never ' f'arrived!' ) # Check the contents of each message for actual_state, expected_state in zip(actual_states, expected_states): clean_actual_state = self._remove_non_deterministic_keys(actual_state) assert clean_actual_state['inputs'] == expected_state['inputs'] for actual_message, expected_message in zip( clean_actual_state['outputs']['messages'], expected_state['outputs']['messages'], ): for key, expected_value in expected_message.items(): self._check_output_key( key=key, actual_value=actual_message[key], expected_value=expected_value, ) def _remove_non_deterministic_keys(self, actual_state: dict) -> dict: """ Allow for subclasses to delete certain keys in the actual state that will change on each run. """ return actual_state def _check_output_key( self: Union['AbstractParlAIChatTest', unittest.TestCase], key: str, actual_value: Any, expected_value: Any, ): # TODO: remove typing of self after switching to pytest regressions, in which we # no longer inherit from TestCase """ Check the actual and expected values, given that they come from the specified key of the output message dictionary. This function can be extended to handle special cases for subclassed Mephisto tasks. """ if key == 'timestamp': pass # The timestamp will obviously be different elif key == 'data': for key_inner, expected_value_inner in expected_value.items(): if key_inner in ['beam_texts', 'message_id']: pass # The message ID will be different else: if actual_value[key_inner] != expected_value_inner: raise ValueError( f'The value of ["{key}"]["{key_inner}"] is supposed to be ' f'{expected_value_inner} but is actually ' f'{actual_value[key_inner]}!' ) else: if actual_value != expected_value: raise ValueError( f'The value of ["{key}"] is supposed to be {expected_value} but is ' f'actually {actual_value}!' ) def _send_agent_message( self, agent_id: str, agent_display_id: str, text: str, task_data: Dict[str, Any] ): """ Have the agent specified by agent_id send the specified text and task data with the given display ID string. """ act_content = { "text": text, "task_data": task_data, "id": agent_display_id, "episode_done": False, } self.server.send_agent_act(agent_id=agent_id, act_content=act_content) def check_stdout(actual_stdout: str, expected_stdout_path: str): """ Check that actual and expected stdouts match. Given a string of the actual stdout and a path to the expected stdout, check that both stdouts match, keeping in mind that the actual stdout may have additional strings relating to progress updates that are not found in the expected output strings. TODO: this can probably be moved to a method of an abstract test class once all analysis code relies on pytest regressions for some of its tests. """ actual_stdout_lines = actual_stdout.split('\n') with open(expected_stdout_path) as f: expected_stdout = f.read() for expected_line in expected_stdout.split('\n'): if not any(expected_line in actual_line for actual_line in actual_stdout_lines): raise ValueError( f'\n\tThe following line:\n\n{expected_line}\n\n\twas not found ' f'in the actual stdout:\n\n{actual_stdout}' )
"""Nearest Neighbor Classification""" # Authors: Jake Vanderplas <vanderplas@astro.washington.edu> # Fabian Pedregosa <fabian.pedregosa@inria.fr> # Alexandre Gramfort <alexandre.gramfort@inria.fr> # Sparseness support by Lars Buitinck # Multi-output support by Arnaud Joly <a.joly@ulg.ac.be> # # License: BSD 3 clause (C) INRIA, University of Amsterdam import numpy as np from scipy import stats from ..utils.extmath import weighted_mode from .base import \ _check_weights, _get_weights, \ NeighborsBase, KNeighborsMixin,\ RadiusNeighborsMixin, SupervisedIntegerMixin from ..base import ClassifierMixin from ..utils import check_array class KNeighborsClassifier(NeighborsBase, KNeighborsMixin, SupervisedIntegerMixin, ClassifierMixin): """Classifier implementing the k-nearest neighbors vote. Read more in the :ref:`User Guide <classification>`. Parameters ---------- n_neighbors : int, optional (default = 5) Number of neighbors to use by default for :meth:`kneighbors` queries. weights : str or callable, optional (default = 'uniform') weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDTree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. metric : string or callable, default 'minkowski' the distance metric to use for the tree. The default metric is minkowski, and with p=2 is equivalent to the standard Euclidean metric. See the documentation of the DistanceMetric class for a list of available metrics. p : integer, optional (default = 2) Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. metric_params : dict, optional (default = None) Additional keyword arguments for the metric function. n_jobs : int, optional (default = 1) The number of parallel jobs to run for neighbors search. If ``-1``, then the number of jobs is set to the number of CPU cores. Doesn't affect :meth:`fit` method. Examples -------- >>> X = [[0], [1], [2], [3]] >>> y = [0, 0, 1, 1] >>> from sklearn.neighbors import KNeighborsClassifier >>> neigh = KNeighborsClassifier(n_neighbors=3) >>> neigh.fit(X, y) # doctest: +ELLIPSIS KNeighborsClassifier(...) >>> print(neigh.predict([[1.1]])) [0] >>> print(neigh.predict_proba([[0.9]])) [[ 0.66666667 0.33333333]] See also -------- RadiusNeighborsClassifier KNeighborsRegressor RadiusNeighborsRegressor NearestNeighbors Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. .. warning:: Regarding the Nearest Neighbors algorithms, if it is found that two neighbors, neighbor `k+1` and `k`, have identical distances but different labels, the results will depend on the ordering of the training data. https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm """ def __init__(self, n_neighbors=5, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', metric_params=None, n_jobs=1, **kwargs): self._init_params(n_neighbors=n_neighbors, algorithm=algorithm, leaf_size=leaf_size, metric=metric, p=p, metric_params=metric_params, n_jobs=n_jobs, **kwargs) self.weights = _check_weights(weights) def predict(self, X): """Predict the class labels for the provided data Parameters ---------- X : array-like, shape (n_query, n_features), \ or (n_query, n_indexed) if metric == 'precomputed' Test samples. Returns ------- y : array of shape [n_samples] or [n_samples, n_outputs] Class labels for each data sample. """ X = check_array(X, accept_sparse='csr') neigh_dist, neigh_ind = self.kneighbors(X) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_outputs = len(classes_) n_samples = X.shape[0] weights = _get_weights(neigh_dist, self.weights) y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype) for k, classes_k in enumerate(classes_): if weights is None: mode, _ = stats.mode(_y[neigh_ind, k], axis=1) else: mode, _ = weighted_mode(_y[neigh_ind, k], weights, axis=1) mode = np.asarray(mode.ravel(), dtype=np.intp) y_pred[:, k] = classes_k.take(mode) if not self.outputs_2d_: y_pred = y_pred.ravel() return y_pred def predict_proba(self, X): """Return probability estimates for the test data X. Parameters ---------- X : array-like, shape (n_query, n_features), \ or (n_query, n_indexed) if metric == 'precomputed' Test samples. Returns ------- p : array of shape = [n_samples, n_classes], or a list of n_outputs of such arrays if n_outputs > 1. The class probabilities of the input samples. Classes are ordered by lexicographic order. """ X = check_array(X, accept_sparse='csr') neigh_dist, neigh_ind = self.kneighbors(X) classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_samples = X.shape[0] weights = _get_weights(neigh_dist, self.weights) if weights is None: weights = np.ones_like(neigh_ind) all_rows = np.arange(X.shape[0]) probabilities = [] for k, classes_k in enumerate(classes_): pred_labels = _y[:, k][neigh_ind] proba_k = np.zeros((n_samples, classes_k.size)) # a simple ':' index doesn't work right for i, idx in enumerate(pred_labels.T): # loop is O(n_neighbors) proba_k[all_rows, idx] += weights[:, i] # normalize 'votes' into real [0,1] probabilities normalizer = proba_k.sum(axis=1)[:, np.newaxis] normalizer[normalizer == 0.0] = 1.0 proba_k /= normalizer probabilities.append(proba_k) if not self.outputs_2d_: probabilities = probabilities[0] return probabilities class RadiusNeighborsClassifier(NeighborsBase, RadiusNeighborsMixin, SupervisedIntegerMixin, ClassifierMixin): """Classifier implementing a vote among neighbors within a given radius Read more in the :ref:`User Guide <classification>`. Parameters ---------- radius : float, optional (default = 1.0) Range of parameter space to use by default for :meth`radius_neighbors` queries. weights : str or callable weight function used in prediction. Possible values: - 'uniform' : uniform weights. All points in each neighborhood are weighted equally. - 'distance' : weight points by the inverse of their distance. in this case, closer neighbors of a query point will have a greater influence than neighbors which are further away. - [callable] : a user-defined function which accepts an array of distances, and returns an array of the same shape containing the weights. Uniform weights are used by default. algorithm : {'auto', 'ball_tree', 'kd_tree', 'brute'}, optional Algorithm used to compute the nearest neighbors: - 'ball_tree' will use :class:`BallTree` - 'kd_tree' will use :class:`KDtree` - 'brute' will use a brute-force search. - 'auto' will attempt to decide the most appropriate algorithm based on the values passed to :meth:`fit` method. Note: fitting on sparse input will override the setting of this parameter, using brute force. leaf_size : int, optional (default = 30) Leaf size passed to BallTree or KDTree. This can affect the speed of the construction and query, as well as the memory required to store the tree. The optimal value depends on the nature of the problem. metric : string or callable, default 'minkowski' the distance metric to use for the tree. The default metric is minkowski, and with p=2 is equivalent to the standard Euclidean metric. See the documentation of the DistanceMetric class for a list of available metrics. p : integer, optional (default = 2) Power parameter for the Minkowski metric. When p = 1, this is equivalent to using manhattan_distance (l1), and euclidean_distance (l2) for p = 2. For arbitrary p, minkowski_distance (l_p) is used. outlier_label : int, optional (default = None) Label, which is given for outlier samples (samples with no neighbors on given radius). If set to None, ValueError is raised, when outlier is detected. metric_params : dict, optional (default = None) Additional keyword arguments for the metric function. Examples -------- >>> X = [[0], [1], [2], [3]] >>> y = [0, 0, 1, 1] >>> from sklearn.neighbors import RadiusNeighborsClassifier >>> neigh = RadiusNeighborsClassifier(radius=1.0) >>> neigh.fit(X, y) # doctest: +ELLIPSIS RadiusNeighborsClassifier(...) >>> print(neigh.predict([[1.5]])) [0] See also -------- KNeighborsClassifier RadiusNeighborsRegressor KNeighborsRegressor NearestNeighbors Notes ----- See :ref:`Nearest Neighbors <neighbors>` in the online documentation for a discussion of the choice of ``algorithm`` and ``leaf_size``. https://en.wikipedia.org/wiki/K-nearest_neighbor_algorithm """ def __init__(self, radius=1.0, weights='uniform', algorithm='auto', leaf_size=30, p=2, metric='minkowski', outlier_label=None, metric_params=None, **kwargs): self._init_params(radius=radius, algorithm=algorithm, leaf_size=leaf_size, metric=metric, p=p, metric_params=metric_params, **kwargs) self.weights = _check_weights(weights) self.outlier_label = outlier_label def predict(self, X): """Predict the class labels for the provided data Parameters ---------- X : array-like, shape (n_query, n_features), \ or (n_query, n_indexed) if metric == 'precomputed' Test samples. Returns ------- y : array of shape [n_samples] or [n_samples, n_outputs] Class labels for each data sample. """ X = check_array(X, accept_sparse='csr') n_samples = X.shape[0] neigh_dist, neigh_ind = self.radius_neighbors(X) inliers = [i for i, nind in enumerate(neigh_ind) if len(nind) != 0] outliers = [i for i, nind in enumerate(neigh_ind) if len(nind) == 0] classes_ = self.classes_ _y = self._y if not self.outputs_2d_: _y = self._y.reshape((-1, 1)) classes_ = [self.classes_] n_outputs = len(classes_) if self.outlier_label is not None: neigh_dist[outliers] = 1e-6 elif outliers: raise ValueError('No neighbors found for test samples %r, ' 'you can try using larger radius, ' 'give a label for outliers, ' 'or consider removing them from your dataset.' % outliers) weights = _get_weights(neigh_dist, self.weights) y_pred = np.empty((n_samples, n_outputs), dtype=classes_[0].dtype) for k, classes_k in enumerate(classes_): pred_labels = np.zeros(len(neigh_ind), dtype=object) pred_labels[:] = [_y[ind, k] for ind in neigh_ind] if weights is None: mode = np.array([stats.mode(pl)[0] for pl in pred_labels[inliers]], dtype=np.int) else: mode = np.array([weighted_mode(pl, w)[0] for (pl, w) in zip(pred_labels[inliers], weights[inliers])], dtype=np.int) mode = mode.ravel() y_pred[inliers, k] = classes_k.take(mode) if outliers: y_pred[outliers, :] = self.outlier_label if not self.outputs_2d_: y_pred = y_pred.ravel() return y_pred
# Copyright 2015, 2016 IBM Corp. # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from src.main.HmcRestClient import * from src.logical_partition import ListLogicalPartition, \ DeleteLogicalPartition from src.logical_partition.sriov_logical_port import ListSRIOVLogicalPort, \ CreateSRIOVLogicalPort, \ ClearSRIOVLogicalPortStatistics, \ ModifySRIOVEthernetLogicalPort from src.logical_partition.virtual_fibrechannel_client_adapter import ListVirtualFibreChannelClientAdapter, \ CreateVirtualFibreChannelClientAdapter from src.logical_partition.vscsi_client_adapter import ListVirtualSCSIClientAdapter, \ CreateVirtualSCSIClientAdapter from src.logical_partition.client_network_adapter import CreateClientNetworkAdapter, \ ListClientNetworkAdapter from src.partition_operation_util import PowerOnPartition,\ PowerOffPartition,\ ModifyPartition, \ CreatePartition from src.logical_partition_profile import ListLogicalPartitionProfile,\ CreateLogicalPartitionProfile,\ ModifyLogicalPartitionProfile import sys import os #################### # LOGICAL PARTITON #################### directory = os.path.dirname(os.path.dirname(__file__)) def logicalpartition_children(n1, managedsystem_uuid, ip, x_api_session): """ This function provides a detailed view of the Logical Partitions Args: n1 : variable for client selected choices managedsystem_uuid : The unique id of the Managed system ip: ip address of hmc x_api_session : session to be used """ os.system("cls") n = n1 if n == 1: #Logical Partition operations while True: print ("\n\n","LogicalPartition operations".center(50)) print_list = ['List','Create','Delete','Poweron', 'Poweroff','Modify','Return to LogicalPartition Menu', 'Return to ManagedSystem Menu','Return to MainMenu','Help','Exit'] #select any one Logical partition operation x = int(print_obj.print_on_screen(print_list)) listlogicalpartition_object = ListLogicalPartition.ListLogicalPartition() object_list = listlogicalpartition_object.\ list_LogicalPartition(ip, managedsystem_uuid, x_api_session) if x == 1: # object creation and method call to List Logical Partition print("\nAvailable LogicalPartitions :") selected_logicalpartition_object = get_selectedobject(object_list) if selected_logicalpartition_object != None: listlogicalpartition_object.print_logicalpartition_attributes(selected_logicalpartition_object) elif x == 2: #object creation and method call to create Logicalpartition try: print("\nLogical Partition will be created with Following configruations,\n maximum,mimimum and desired memory = 256", "\nShared processors,Minimum,Desired and maximum processing units = 0.5,\npartition type = AIX/Linux") logicalpartition_object = CreatePartition.CreatePartition("LogicalPartition") created_logicalpartition_object = logicalpartition_object.create_Partition(ip, managedsystem_uuid, x_api_session) print("\nPartition %s Created Successfully\n"%(created_logicalpartition_object.PartitionName.value())) listlogicalpartition_object.print_logicalpartition_attributes(created_logicalpartition_object) except (TypeError,AttributeError) : log_object.log_error("Error in lpar creation") elif x == 3: #object creation and method call to delete Logical partition selected_logicalpartition_object = None print("\nAvailable LogicalPartitions :") selected_logicalpartition_object = get_selectedobject(object_list) if selected_logicalpartition_object != None: logicalpartition_object = DeleteLogicalPartition.DeleteLogicalPartition() logicalpartition_object.delete_LogicalPartition(ip, managedsystem_uuid, selected_logicalpartition_object, x_api_session) elif x == 4: #object creation and method call to Poweron Logical partition listlogicalpartition_object = ListLogicalPartition.ListLogicalPartition() object_list = listlogicalpartition_object.\ list_LogicalPartition(ip, managedsystem_uuid, x_api_session) print("\nList of Partitions in inactive state") k = 0 inactive_object_list = [] for i in range(0,len(object_list)): if object_list[i].PartitionState.value() == "not activated": k = k+1 print("%s.%s" % (k,object_list[i].PartitionName.value())) inactive_object_list.append(object_list[i]) if k>0: try: c = int(input("\nSelect any partition index the operation to be performed:")) if c > 0: ch = c-1 selected_logicalpartition_object = inactive_object_list[ch] logicalpartition_object = PowerOnPartition.PowerOnPartition("LogicalPartition") logicalpartition_object.poweron_Partition(ip, selected_logicalpartition_object, x_api_session) else : print("\nTry again using valid option") except IndexError : print("\nTry again using valid option") else: log_object.log_warn("No Partitions are in inactive state") elif x == 5: #object creation and method call to Poweroff Logical Partition listlogicalpartition_object = ListLogicalPartition.ListLogicalPartition() object_list = listlogicalpartition_object.\ list_LogicalPartition(ip, managedsystem_uuid, x_api_session) print("\nList of Partitions in active state") k = 0 active_object_list = [] for i in range(0,len(object_list)): if object_list[i].PartitionState.value() == "open firmware" or object_list[i].PartitionState.value() == "running": k = k+1 print("%s.%s" % (k,object_list[i].PartitionName.value())) active_object_list.append(object_list[i]) if k>0 : try: c = int(input("\nSelect any partition index the operation to be performed:")) if c > 0: ch = c-1 selected_logicalpartition_object = active_object_list[ch] logicalpartition_object = PowerOffPartition.PowerOffPartition("LogicalPartition") logicalpartition_object.poweroff_Partition(ip, selected_logicalpartition_object, x_api_session) else: print("\nTry again using valid option") except IndexError : print("\nTry again using valid option") else: log_object.log_warn("No Partitions are in active state") elif x == 6: #object creation and method call to Modify Logical Partition print("\nAvailable LogicalPartitions :") selected_logicalPartition_object = get_selectedobject(object_list) if selected_logicalpartition_object != None: print("\nLogical partition memory attributes are modified as maximum ,minimum ,desired memory = 512") modify_logicalpartition_object = ModifyPartition.ModifyPartition("LogicalPartition") result = modify_logicalpartition_object.modify_Partition(ip,selected_logicalPartition_object,x_api_session) if result: print("\nModifications are updated successfully") else: log_object.log_error("Error occured while updating the modifications.Verify \ whether the partitions are in running or not activated state before updating it") elif x == 7: os.system("cls") return 1 elif x == 8: os.system("cls") return 2 elif x == 9: os.system("cls") return 3 elif x == 10: print(open(directory+"/help/LogicalPartition/LogicalPartitionOperations.txt").read()) elif x == 11: sys.exit(1) else: print("\nTry again using valid option") back_to_menu() elif n == 2: #LogicalPartition Profile operations while True: print ("\n\n","LogicalPartition Profile".center(50)) print_list = ['List','Create', 'Modify','Return to LogicalPartition Menu', 'Return to ManagedSystem Menu','Return to MainMenu', 'Help','Exit'] #select any one LogicalPartitionProfile operation x1 = int(print_obj.print_on_screen(print_list)) try: if x1 > 0 and x1 < 4: print("\nAvailable LogicalPartitions :") logicalpartition_object = ListLogicalPartition.ListLogicalPartition() object_list = logicalpartition_object.\ list_LogicalPartition(ip, managedsystem_uuid, x_api_session) list_logicalpartitionprofile_object = ListLogicalPartitionProfile.\ ListLogicalPartitionProfile("LogicalPartition") selected_logicalpartition_object=get_selectedobject(object_list) if x1 == 1: # object creation and method call to list all profiles for selected LPAR if selected_logicalpartition_object != None: partition_id =selected_logicalpartition_object.PartitionUUID.value() profile_object_list = list_logicalpartitionprofile_object.\ list_LogicalPartitionProfile(ip,partition_id, x_api_session) for i in range(0,len(profile_object_list)): list_logicalpartitionprofile_object.\ print_logicalpartitionprofile_attributes(profile_object_list[i]) elif x1 == 2: # object creation and method call to create LPAR Profile if selected_logicalpartition_object != None: print("\nLogical Partition profile will be created with Following configruations,", "\n maximum,mimimum and desired memory = 256", "\nprofile type = REG_LPAR_PROFILE_TYPE") create_logicalpartitionprofile_object = CreateLogicalPartitionProfile.\ CreateLogicalPartitionProfile("LogicalPartition") created_logicalpartitionprofile_object = create_logicalpartitionprofile_object.\ create_LogicalPartitionProfile(ip, selected_logicalpartition_object, x_api_session) if created_logicalpartitionprofile_object != None : print("\nProfile %s Created Successfully\n"%(created_logicalpartitionprofile_object.ProfileName.value())) list_logicalpartitionprofile_object.\ print_logicalpartitionprofile_attributes(created_logicalpartitionprofile_object) elif x1 == 3: # object creation and method call to Modify selected Profile if selected_logicalpartition_object != None: partition_id =selected_logicalpartition_object.PartitionUUID.value() profile_object_list = list_logicalpartitionprofile_object.\ list_LogicalPartitionProfile(ip,partition_id, x_api_session) print("\nAvailable LogicalPartitionProfile:") for i in range(0,len(profile_object_list)): print("%s.%s"%(i+1,profile_object_list[i].ProfileName.value())) try: ch=int(input("\nselect any profile index to modify :")) print("\nLogical partition profile memory attributes are modified as maximum ,minimum ,desired memory = 512") modify_logicalpartitionprofile_object = ModifyLogicalPartitionProfile.\ ModifyLogicalPartitionProfile("LogicalPartition") modify_bool = modify_logicalpartitionprofile_object.\ modify_LogicalPartitionProfile(ip, partition_id, profile_object_list[ch-1], x_api_session) if modify_bool: print("\nUpdations to the profile are made Successfully") else: log_object.log_error("\nError occured while updating") except IndexError : print("\nTry again using valid option") elif x1 == 4: os.system("cls") return 1 elif x1 == 5: os.system("cls") return 2 elif x1 == 6: os.system("cls") return 3 elif x1 == 7: print(open(directory+"/help/LogicalPartitionProfile.txt").read()) elif x1 == 8: sys.exit(1) else: print("\nTry again using valid option") back_to_menu() except IndexError : log_object.log_warn("No LogicalPartition Available") back_to_menu() elif n == 3: #ClientNetworkAdapter operations while True: print ("\n\n","ClientNetworkAdapter".center(50)) print_list = ['List','Create','Return to LogicalPartition Menu', 'Return to ManagedSystem Menu','Return to MainMenu','Help','Exit'] #select any ClientNetworkAdapter operation x1 = int(print_obj.print_on_screen(print_list)) if x1 > 0 and x1 < 3 : print("\nAvailable LogicalPartitions :") logicalpartition_object = ListLogicalPartition.ListLogicalPartition() object_list = logicalpartition_object.\ list_LogicalPartition(ip, managedsystem_uuid, x_api_session) selected_logicalpartition_object=get_selectedobject(object_list) if x1 == 1: #object creation and method call to list all client network adapters available in th selected LPAR if selected_logicalpartition_object != None: logicalpartition_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value() list_clientnetwork_adapter_object = ListClientNetworkAdapter.ListClientNetworkAdapter() clientnetwork_adapter_list = list_clientnetwork_adapter_object.\ list_clientnetwork_adapter(ip, logicalpartition_id, x_api_session) try: for clientnetwork_adapter in clientnetwork_adapter_list: list_clientnetwork_adapter_object.print_clientnetwork_adapter_attributes(clientnetwork_adapter) except TypeError: log_object.log_warn("\nNo ClientNetworkAdapters are Available") elif x1 == 2: #object creation and method call to create client network adapter if selected_logicalpartition_object != None: logicalpartition_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value() client_networkadapter_object = CreateClientNetworkAdapter.\ CreateClientNetworkAdapter() client_networkadapter_object.create_clientnetwork_adapter(ip, logicalpartition_id , x_api_session) elif x1 == 3: os.system("cls") return 1 elif x1 == 4: os.system("cls") return 2 elif x1 == 5: os.system("cls") return 3 elif x1 == 6: print(open(directory+"/help/LogicalPartition/ClientNetworkAdapter.txt").read()) elif x1 == 7: sys.exit(1) else: print("\nTry again using valid option") back_to_menu() elif n == 4: #virtual scsi adapter operations while True: print ("\n\n","VirtualSCSIClientAdapter".center(50)) print_list = ['List','Create','Return to LogicalPartition Menu', 'Return to ManagedSystem Menu','Return to MainMenu','Help','Exit'] #select any VirtualSCSIClientAdapter operation x1 = int(print_obj.print_on_screen(print_list)) if x1 > 0 and x1 < 3: print("\nAvailable LogicalPartitions :") logicalpartition_object = ListLogicalPartition.\ ListLogicalPartition() object_list = logicalpartition_object.\ list_LogicalPartition(ip, managedsystem_uuid, x_api_session) selected_logicalpartition_object=get_selectedobject(object_list) if x1 == 1: #object creation and method call to list all virtual scsi adapters in the selected lpar if selected_logicalpartition_object != None: lpar_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value() vscsi_list_object = ListVirtualSCSIClientAdapter.\ ListVirtualSCSIClientAdapter() object_list = vscsi_list_object.list_virtualscsi_clientadapter(ip, lpar_id, x_api_session) if object_list != None: print("\nDetails of Available VirtualSCSIClientAdapters :", "\n--------------------------------------------------") for i in range(0,len(object_list)): vscsi_list_object.print_vscsi_attributes(object_list[i]) else : log_object.log_warn("There are No VirtualSCSIClientAdapters in the selected LogicalPartition") elif x1 == 2: #object creation and method call to create a virtual scsii adapter in the selected lpar if selected_logicalpartition_object != None: lpar_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value() vscsi_create_object = CreateVirtualSCSIClientAdapter.\ CreateVirtualSCSIClientAdapter() vscsi_create_object.create_vscsi_clientadapter(ip, lpar_id, x_api_session) elif x1 == 3: os.system("cls") return 1 elif x1 == 4: os.system("cls") return 2 elif x1 == 5: os.system("cls") return 3 elif x1 == 6: print(open(directory+"/help/LogicalPartition/VirtualSCSIAdapter.txt").read()) elif x1 == 7: sys.exit(1) else: print("\nTry again using valid option") back_to_menu() elif n == 5: while True: #virtual fibre channel adapter operations print ("\n\n","VirtualFibreChannelClientAdapter".center(50)) print_list = ['List','Create','Return to LogicalPartition Menu', 'Return to ManagedSystem Menu','Return to MainMenu','Help','Exit'] #select any VirtualFibreChannelClientAdapter operation x1 = int(print_obj.print_on_screen(print_list)) if x1 > 0 and x1 < 3 : print("\nAvailable LogicalPartitions :") logicalpartition_object = ListLogicalPartition.\ ListLogicalPartition() object_list = logicalpartition_object.\ list_LogicalPartition(ip, managedsystem_uuid, x_api_session) selected_logicalpartition_object=get_selectedobject(object_list) if x1 == 1: # object creation and method call to list all virtual fibre channel adapters in the selected lpar if selected_logicalpartition_object != None: lpar_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value() vfc_list_object = ListVirtualFibreChannelClientAdapter.\ ListVirtualFibreChannelClientAdapter() object_list = vfc_list_object.\ list_virtualfibrechannel_clientadapter(ip, lpar_id, x_api_session) if object_list != None: print("\nDetails of Available VirtualFibreChannelClientAdapters :", "\n------------------------------------------------------") for i in range(0,len(object_list)): vfc_list_object.print_virtualfibrechannel_attributes(object_list[i]) else : log_object.log_warn("There are No VirtualFibreChannelClientAdapters in the selected LogicalPartition") elif x1 == 2: #object creation and method call to create virtual fibre channel adapter in the selected lpar if selected_logicalpartition_object != None: lpar_id = selected_logicalpartition_object.Metadata.Atom.AtomID.value() virtualfibrechannel_create_object = CreateVirtualFibreChannelClientAdapter.\ CreateVirtualFibreChannelClientAdapter() virtualfibrechannel_create_object.create_virtualfibrechannel_clientadapter(ip, lpar_id, x_api_session) elif x1 == 3: os.system("cls") return 1 elif x1 == 4: os.system("cls") return 2 elif x1 == 5: os.system("cls") return 3 elif x1 == 6: print(open(directory+"/help/LogicalPartition/VirtualFibreChannelAdapter.txt").read()) elif x1 == 7: sys.exit(1) else: print("\nTry again using valid option") back_to_menu() elif n == 6: #SRIOV ethernet Logical Port operations while True: print ("\n\n","SRIOV Ethernet Logical Port".center(50)) print_list = ['List','Create','Clear Statistics','Modify', 'Return to LogicalPartition Menu', 'Return to ManagedSystem Menu', 'Return to MainMenu','Help','Exit'] x1 = int(print_obj.print_on_screen(print_list)) if x1 > 0 and x1 < 5 : print("\nAvailable LogicalPartitions :") logicalpartition_object = ListLogicalPartition.ListLogicalPartition() object_list = logicalpartition_object.\ list_LogicalPartition(ip, managedsystem_uuid, x_api_session) selected_logicalpartition_object = get_selectedobject(object_list) if selected_logicalpartition_object != None: lpar_uuid = selected_logicalpartition_object.Metadata.Atom.AtomID.value() if x1 == 1: #object creation and method call to list all SRIOV Ethernet Logical Port if selected_logicalpartition_object != None: sriov_logicalPort = ListSRIOVLogicalPort.\ ListSRIOVLogicalPort() sriov_list = sriov_logicalPort.list_sriov_logical_port(ip, lpar_uuid, x_api_session) try: for i in range(0,len(sriov_list)): sriov_logicalPort.print_sriov_logical_port(sriov_list[i]) except (TypeError , AttributeError)as e: log_object.log_warn("No SRIOV LogicalPorts are available") elif x1 == 2: #object creation and method call to create SRIOV Ethernet Logical Port if selected_logicalpartition_object != None: create_sriov_logicalport = CreateSRIOVLogicalPort.\ CreateSRIOVLogicalPort() create_sriov_logicalport.create_sriov_logicalport(ip, lpar_uuid, x_api_session) elif x1 == 3: #object creation and method call to Clear statistics of SRIOV Ethernet Logical Port if selected_logicalpartition_object != None: sriov_logicalPort = ListSRIOVLogicalPort.ListSRIOVLogicalPort() sriov_list = sriov_logicalPort.list_sriov_logical_port(ip, lpar_uuid, x_api_session) try: for i in range(0,len(sriov_list)): print("%s.ConfigurationID %s"%(i+1,sriov_list[i].ConfigurationID.value())) ch = int(input("Select a SRIOV Port to clear statistics :")) if ch > 0 and ch <= len(sriov_list): sriov_uuid = sriov_list[ch-1].Metadata.Atom.AtomID.value() clear_statistics = ClearSRIOVLogicalPortStatistics.\ ClearSRIOVLogicalPortStatistics() clear_statistics.clear_sriov_logicalport_statistics(ip, lpar_uuid, sriov_uuid, x_api_session) else: print("\nTry again using valid option") except (TypeError , AttributeError)as e: log_object.log_warn("No SRIOV LogicalPorts are available") elif x1 == 4: #object creation and method call to modify SRIOV Ethernet Logical Port if selected_logicalpartition_object != None: sriov_logicalPort = ListSRIOVLogicalPort.ListSRIOVLogicalPort() sriov_list = sriov_logicalPort.list_sriov_logical_port(ip, lpar_uuid, x_api_session) try: for i in range(0,len(sriov_list)): print("%s.ConfigurationID %s"%(i+1,sriov_list[i].ConfigurationID.value())) ch = int(input("Select a SRIOV Port to modify :")) if ch > 0 and ch <= len(sriov_list): modify_sriov = ModifySRIOVEthernetLogicalPort.ModifySRIOVEthernetLogicalPort() modify_sriov.modify_sriov_logicalport(ip, lpar_uuid, sriov_list[ch-1], x_api_session) except (TypeError , AttributeError)as e: log_object.log_warn("No SRIOV LogicalPorts are available") print(e) elif x1 == 5: os.system("cls") return 1 elif x1 == 6: os.system("cls") return 2 elif x1 == 7: os.system("cls") return 3 elif x1 == 8: print(open(directory+"/help/LogicalPartition/SRIOVEthernetLogicalPort.txt").read()) elif x1 == 9 : sys.exit(1) else: print("\nTry again using valid option") back_to_menu()
"""Preliminary socket module. XXX Restrictions: - Only INET sockets - No asynchronous behavior - No socket options - Can't do a very good gethostbyaddr() right... """ import java.net import org.python.core import jarray import string __all__ = ['AF_INET', 'SOCK_DGRAM', 'SOCK_RAW', 'SOCK_RDM', 'SOCK_SEQPACKET', 'SOCK_STREAM', 'SocketType', 'error', 'getfqdn', 'gethostbyaddr', 'gethostbyname', 'gethostname', 'socket'] error = IOError AF_INET = 2 SOCK_DGRAM = 1 SOCK_STREAM = 2 SOCK_RAW = 3 # not supported SOCK_RDM = 4 # not supported SOCK_SEQPACKET = 5 # not supported SOL_SOCKET = 0xFFFF SO_REUSEADDR = 4 def _gethostbyaddr(name): # This is as close as I can get; at least the types are correct... addresses = java.net.InetAddress.getAllByName(gethostbyname(name)) names = [] addrs = [] for addr in addresses: names.append(addr.getHostName()) addrs.append(addr.getHostAddress()) return (names, addrs) def getfqdn(name=None): """ Return a fully qualified domain name for name. If name is omitted or empty it is interpreted as the local host. To find the fully qualified name, the hostname returned by gethostbyaddr() is checked, then aliases for the host, if available. The first name which includes a period is selected. In case no fully qualified domain name is available, the hostname is retur New in version 2.0. """ if not name: name = gethostname() names, addrs = _gethostbyaddr(name) for a in names: if a.find(".") >= 0: return a return name def gethostname(): return java.net.InetAddress.getLocalHost().getHostName() def gethostbyname(name): return java.net.InetAddress.getByName(name).getHostAddress() def gethostbyaddr(name): names, addrs = _gethostbyaddr(name) return (names[0], names, addrs) def socket(family, type, flags=0): assert family == AF_INET assert type in (SOCK_DGRAM, SOCK_STREAM) assert flags == 0 if type == SOCK_STREAM: return _tcpsocket() else: return _udpsocket() class _tcpsocket: sock = None istream = None ostream = None addr = None server = 0 file_count = 0 reuse_addr = 0 def bind(self, addr, port=None): if port is not None: addr = (addr, port) assert not self.sock assert not self.addr host, port = addr # format check self.addr = addr def listen(self, backlog=50): "This signifies a server socket" assert not self.sock self.server = 1 if self.addr: host, port = self.addr else: host, port = "", 0 if host: a = java.net.InetAddress.getByName(host) self.sock = java.net.ServerSocket(port, backlog, a) else: self.sock = java.net.ServerSocket(port, backlog) if hasattr(self.sock, "setReuseAddress"): self.sock.setReuseAddress(self.reuse_addr) def accept(self): "This signifies a server socket" if not self.sock: self.listen() assert self.server sock = self.sock.accept() host = sock.getInetAddress().getHostName() port = sock.getPort() conn = _tcpsocket() conn._setup(sock) return conn, (host, port) def connect(self, addr, port=None): "This signifies a client socket" if port is not None: addr = (addr, port) assert not self.sock host, port = addr if host == "": host = java.net.InetAddress.getLocalHost() self._setup(java.net.Socket(host, port)) def _setup(self, sock): self.sock = sock if hasattr(self.sock, "setReuseAddress"): self.sock.setReuseAddress(self.reuse_addr) self.istream = sock.getInputStream() self.ostream = sock.getOutputStream() def recv(self, n): assert self.sock data = jarray.zeros(n, 'b') m = self.istream.read(data) if m <= 0: return "" if m < n: data = data[:m] return data.tostring() def send(self, s): assert self.sock n = len(s) self.ostream.write(s) return n def getsockname(self): if not self.sock: host, port = self.addr or ("", 0) host = java.net.InetAddress.getByName(host).getHostAddress() else: if self.server: host = self.sock.getInetAddress().getHostAddress() else: host = self.sock.getLocalAddress().getHostAddress() port = self.sock.getLocalPort() return (host, port) def getpeername(self): assert self.sock assert not self.server host = self.sock.getInetAddress().getHostAddress() port = self.sock.getPort() return (host, port) def setsockopt(self, level, optname, value): if optname == SO_REUSEADDR: self.reuse_addr = value def getsockopt(self, level, optname): if optname == SO_REUSEADDR: return self.reuse_addr def makefile(self, mode="r", bufsize=-1): file = None if self.istream: if self.ostream: file = org.python.core.PyFile(self.istream, self.ostream, "<socket>", mode) else: file = org.python.core.PyFile(self.istream, "<socket>", mode) elif self.ostream: file = org.python.core.PyFile(self.ostream, "<socket>", mode) else: raise IOError, "both istream and ostream have been shut down" if file: return _tcpsocket.FileWrapper(self, file) class FileWrapper: def __init__(self, socket, file): self.socket = socket self.sock = socket.sock self.istream = socket.istream self.ostream = socket.ostream self.file = file self.read = file.read self.readline = file.readline self.readlines = file.readlines self.write = file.write self.writelines = file.writelines self.flush = file.flush self.seek = file.seek self.tell = file.tell self.socket.file_count += 1 def close(self): if self.file.closed: # Already closed return self.socket.file_count -= 1 self.file.close() if self.socket.file_count == 0 and self.socket.sock == 0: # This is the last file Only close the socket and streams # if there are no outstanding files left. if self.sock: self.sock.close() if self.istream: self.istream.close() if self.ostream: self.ostream.close() def shutdown(self, how): assert how in (0, 1, 2) assert self.sock if how in (0, 2): self.istream = None if how in (1, 2): self.ostream = None def close(self): sock = self.sock istream = self.istream ostream = self.ostream self.sock = 0 self.istream = 0 self.ostream = 0 # Only close the socket and streams if there are no # outstanding files left. if self.file_count == 0: if istream: istream.close() if ostream: ostream.close() if sock: sock.close() class _udpsocket: def __init__(self): self.sock = None self.addr = None def bind(self, addr, port=None): if port is not None: addr = (addr, port) assert not self.sock host, port = addr if host == "": self.sock = java.net.DatagramSocket(port) else: a = java.net.InetAddress.getByName(host) self.sock = java.net.DatagramSocket(port, a) def connect(self, addr, port=None): if port is not None: addr = (addr, port) host, port = addr # format check assert not self.addr if not self.sock: self.sock = java.net.DatagramSocket() self.addr = addr # convert host to InetAddress instance? def sendto(self, data, addr): n = len(data) if not self.sock: self.sock = java.net.DatagramSocket() host, port = addr bytes = jarray.array(map(ord, data), 'b') a = java.net.InetAddress.getByName(host) packet = java.net.DatagramPacket(bytes, n, a, port) self.sock.send(packet) return n def send(self, data): assert self.addr return self.sendto(self.addr) def recvfrom(self, n): assert self.sock bytes = jarray.zeros(n, 'b') packet = java.net.DatagramPacket(bytes, n) self.sock.receive(packet) host = packet.getAddress().getHostName() port = packet.getPort() m = packet.getLength() if m < n: bytes = bytes[:m] return bytes.tostring(), (host, port) def recv(self, n): assert self.sock bytes = jarray.zeros(n, 'b') packet = java.net.DatagramPacket(bytes, n) self.sock.receive(packet) m = packet.getLength() if m < n: bytes = bytes[:m] return bytes.tostring() def getsockname(self): assert self.sock host = self.sock.getLocalAddress().getHostName() port = self.sock.getLocalPort() return (host, port) def getpeername(self): assert self.sock host = self.sock.getInetAddress().getHostName() port = self.sock.getPort() return (host, port) def __del__(self): self.close() def close(self): sock = self.sock self.sock = 0 sock.close() SocketType = _tcpsocket def test(): s = socket(AF_INET, SOCK_STREAM) s.connect(("", 80)) s.send("GET / HTTP/1.0\r\n\r\n") while 1: data = s.recv(2000) print data if not data: break if __name__ == '__main__': test()
from matplotlib import pyplot from mpl_toolkits.mplot3d import Axes3D from matplotlib.pyplot import cm import numpy as np from sklearn.cluster import KMeans from sklearn.cluster import MiniBatchKMeans import time from scipy.sparse import csr_matrix from scipy.optimize import nnls from sklearn.preprocessing import normalize import logging import sys def visualize_datapoints(X, y, title = ""): d = X.shape[1] assert d == 2 or d == 3, "only 2/3-D datapoints can be visualized" fig = pyplot.figure() if d == 3: ax = fig.add_subplot(111, projection='3d') ax.scatter(X[:,0], X[:,1], X[:,2], c = y) if d == 2: ax = fig.add_subplot(111) ax.scatter(X[:,0],X[:,1], c = y) ax.set_title(title) fig.show() def visualize_anchors(X, A): '''visualize the anchors''' fig = pyplot.figure() ax = fig.add_subplot(111, projection='3d') ax.set_title("Anchors") ax.scatter(X[:,0], X[:,1], X[:,2], alpha=0.1) ax.scatter(A[:,0], A[:,1], A[:,2], s=60, c='r', marker='^') fig.show() def visualize_edges(X, A, Z, threshold, title = ""): '''Visualize the unweighted instance-anchor edges Example: tools.visualize_edges(X, A, Z, 1e-6, alg) ''' d = X.shape[1] assert d == 2 or d == 3, "only 2/3-D edges can be visualized" links = np.where(Z>threshold) # source and target vertices s = X[links[0],:] t = A[links[1],:] fig = pyplot.figure() color=cm.rainbow(np.linspace(0, 1, A.shape[0])) if d == 3: ax = fig.add_subplot(111, projection='3d') ax.view_init(10,-75) edge = lambda i:([s[i,0], t[i,0]], [s[i,1], t[i,1]], [s[i,2], t[i,2]]) if d == 2: ax = fig.add_subplot(111) edge = lambda i:([s[i,0], t[i,0]], [s[i,1], t[i,1]]) for i in xrange(s.shape[0]): ax.plot(*edge(i), c=color[links[1][i],:], alpha=0.6) ax.set_title(title) fig.show() def visualize_eigenvectors(U, k): fig = pyplot.figure() ax = fig.add_subplot(111) for j in xrange(k): ax.plot(U[:,j]) fig.show() def random_data_split(n, n_labeled, n_trials): ls, us = [], [] for trial in xrange(n_trials): l = np.random.choice(n, n_labeled, replace=False) ls.append(l) u = np.setdiff1d(np.arange(n), l) us.append(u) return ls, us def print_formated_results(r): if len(r) > 0: print "\nAccuracy:" for alg in r.keys(): print '%10s' % alg, print n_trials = len(r.values()[0]) for trial in xrange(n_trials): for alg in r.keys(): print '%10.2f' % r[alg][trial], print print "\nMean Accuracy:" for alg in r.keys(): print '%10s' % alg, print for alg in r.keys(): print '%10.2f' % np.mean(r[alg]), print def kmeans_centroids(X, n_clusters): t_start = time.time() km = MiniBatchKMeans(n_clusters=n_clusters,\ init='k-means++', max_iter=5, init_size=2*n_clusters, batch_size=500).fit(X) # A = km.cluster_centers_ # A = split_by_spatial_tree(X, n_clusters) t_elapsed = time.time() - t_start print 'kmeans: %.3f secs' % t_elapsed return km def split_by_spatial_tree(X, n_anchors): '''Data partitioning via spatial trees Dependency: http://cseweb.ucsd.edu/~naverma/SpatialTrees/index.html Args: X: matrix of data points Returns: A: centroids for each spatial partitioning ''' from spatialtree import spatialtree height = np.log2(n_anchors) height_int = np.int(height) if height_int != height: print "number of anchors is not power of 2" T = spatialtree(X, rule='rp', height=height_int, spill=0.0, min_items=1) A = np.zeros((n_anchors, X.shape[1])) c = 0 for t in T.traverse(): if t.isLeaf(): indices = [index for index in t.__iter__()] A[c,:] = np.average(X[indices, :], axis=0) c = c + 1 return A def locally_anchor_embedding(X, A, idx): '''Locally Anchor Embedding Args: X: matrix of data points A: matrix of anchors idx: mapping from each element in X to anchor indices Returns: Z: transition probability from X to A ''' # Warning: no regularization is imposed over Z # Warning: this is a simplified heuristic solution different from the original paper beta = np.vstack([nnls(A[idx[i,:],:].T, X[i,:])[0] for i in xrange(X.shape[0])]) Z = normalize(beta, axis=1, norm='l1') return Z def reduced_sml(Z, l, Yl, gamma): '''dimension-reduced semi-supervised learning Please refer to the anchor graph paper for more details. ''' Lambda_inv = np.diag(1./Z.sum(axis=0)) # sparse operations are crucial for large Z Z_sparse = csr_matrix(Z) ZZ_sparse = Z_sparse.T.dot(Z_sparse) ZZ = ZZ_sparse.toarray() L_tilde = ZZ - ZZ.dot(Lambda_inv).dot(ZZ) Zl = Z[l,:] A = np.linalg.lstsq(Zl.T.dot(Zl) + gamma*L_tilde, Zl.T)[0].dot(Yl) return Z.dot(A) def get_logger(logfile): logger = logging.getLogger(sys.argv[0]) logger.setLevel(logging.INFO) formatter = logging.Formatter('%(asctime)s - %(message)s') ch = logging.StreamHandler(sys.stdout) ch.setFormatter(formatter) fh = logging.FileHandler(logfile) fh.setFormatter(formatter) logger.addHandler(ch) logger.addHandler(fh) return logger
#-*- coding: utf-8 -*- """ . ,-. ,-. ,-. ,-. ,-. | * | | ,-| | `-. |-' | | |-' `-^ ' `-' `-' `' ' | parseli ~~~~~~~ Crawler and Parser for LinkedIn which retrieves and consumes HTML pages, extracts key values and converts them to dict (json) output :copyright: (c) 2012 by Mek :license: BSD, see LICENSE for more details. """ import json import requests from operator import add from BeautifulSoup import BeautifulSoup from utils import Storage # String templates for BeautifulSoup element selecting EMPLOY_SEC_CLS = 'position {} experience vevent vcard summary-{}' EDU_SEC_CLS = 'position {} education vevent vcard' PROFILE_URL = "http://linkedin.com/profile?id={}" def getli(url, raw=False, user_agent=('User-agent', 'Mozilla 3.10'), proxies=None): """Get LinkedIn: json results for any linkedin url params: raw - False if desired output is a python dict, True if raw json dump """ # allow search by li username if '/' not in url: username = url url = 'http://linkedin.com/in/%s' % username if 'https://' in url: url = url.replace('https://', 'http://') if 'http://' not in url: url = 'http://%s' % url soup = crawli(url, user_agent=user_agent, proxies=proxies) parsely = parseli(soup, raw=raw) return parsely def crawli(url, user_agent=('User-agent', 'Mozilla 3.10'), proxies=None): """Crawl LinkedIn: Returns html soup for any linkedin url""" url = url.replace('https://', 'http://') r = requests.get(url, headers=dict([user_agent]), proxies=proxies) html = r.content soup = BeautifulSoup(html) return soup def parseli(soup, raw=False): """Parse LinkedIn: Scrapes, scrubs, and returns a dictionary of key LinkedIn data # TODO: Extend profile to include interests + Viewers """ profile = Storage({ "id": '', "avatar": '', "url": '', "name": {}, "location": {}, "headline": '', "industry": '', "viewers": [], "employment": [], "education": [], "connections": '', "summary": '', "skills": [] }) def meta(profile): """gets metadata like unique_id, and profile url""" jstxt = str(soup.findAll('script')) def get_id(x): attempt_to_find_user_id = '' try: start_id = x.index("newTrkInfo = '") + 14 end_id = x.index(',', start_id) attempt_to_find_user_id = x[start_id:end_id] except: try: start_id = x.index("user_id: ") end_id = x.index(',', start_id) attempt_to_find_user_id = x[start_id:end_id] except: member_id = soup.findAll('div', {'class': 'masthead'}) if member_id: attempt_to_find_user_id = member_id[0]['id'] if 'member-' in attempt_to_find_user_id: return attempt_to_find_user_id.split('member-')[1] return attempt_to_find_user_id liid = get_id(jstxt) def get_url(): canonical_url = soup.findAll('link', {'rel': 'canonical'}) return canonical_url[0]['href'] if canonical_url \ else PROFILE_URL.format(liid) profile.id = liid profile.url = get_url() return profile def header(profile): """Parses the profile-header section +------------------------------------------+ | +-------+ given_name family_name | | | title [at institution] | | pic | locality [(area)] | Industry | | | | +-------+ """ header_sec = soup.findAll('div', {'class': 'profile-card vcard'}) if header_sec: header_sec = header_sec[0] avatar = header_sec.findAll('div', {'class': 'profile-picture'}) if avatar: profile.avatar = avatar[0].findAll('img')[0]['src'] demographic = soup.findAll('div', {"id": 'demographics'}) name = header_sec.findAll('span', {"class": "full-name"}) headline = header_sec.findAll("div", {"id": "headline-container"}) # Generally headline is of the form: "Title at Institution" if headline: profile.headline = headline[0].text if not profile.employment: if ' at ' in profile.headline: try: title, institution = profile.headline.split(' at ') profile["employment"].append({"institution": institution, "title": title}) except: pass if ' @ ' in profile.headline: try: title, institution = profile.headline.split(' @ ') profile["employment"].append({"institution": institution, "title": title}) except: pass if name: given_name = name[0].findAll('span', {'class': 'given-name'}) family_name = name[0].findAll('span', {'class': 'family-name'}) profile.name.update({ 'given-name': given_name[0].text if given_name else '', 'family-name': family_name[0].text if family_name else '', 'full-name': name[0].text }) # Fetch industry, location + area from header section if demographic: demos = demographic[0].findAll('dd') if demos: if len(demos) == 2: industry = demos[1].text profile.industry = industry try: location, area = demos[0].text.replace(")", "").split("(") except: location, area = demos[0].text, "" profile.location = {"locality": location, "area": area} return profile def overview(profile): """Parses the "Overview" section: The overview is used as a last resort to fill in any missing information which could not be obtained by the 'experience' (employment) and 'education' sections. The quality of information it provides is inferior to the aforementioned. given_name family_name's Overview --------------------------------- Current title at institution <0 or n> Past title at institution <0 or n> Education institution <0 or n> """ overview_sec = soup.findAll('dl', {'id': 'overview'}) if overview_sec: if not profile.employment: career_selectors = [\ overview_sec[0].findAll('div', {'class': 'summary-current'}), overview_sec[0].findAll('div', {'class': 'summary-past'}), overview_sec[0].findAll('div', {'class': 'past'}) ] # prune any selector which returns no results, i.e. [], are not lists career_lsts = filter(lambda x: type(x) is list, career_selectors) # if career_lsts contains any non empty lists if any(career_lsts): # reduce on list concat careers = reduce(add, [lst[0] for lst in career_lsts]) for career in careers: title, institution = str(career)[4:-5]\ .replace("\n", "").split('<span class="at">at </span>') profile["employment"].append({"institution": institution, "title": title}) if not profile.education: edu_subsec = overview_sec[0].findAll('dd', {'class': 'summary-education'}) if edu_subsec: edus = edu_subsec[0].findAll('li') for edu in edus: profile['education'].append({'summary': edu.text}) return profile def skillset(profile): skills_sec = soup.findAll('div', {'id':'profile-skills'}) if skills_sec: skills_sec = skills_sec[0] skills = skills_sec.findAll('li') for skill in skills: skill_name = skill.findAll('span', {'class':'endorse-item-name'}) if skill_name: profile['skills'].append(skill_name[0].text) return profile def employment(profile): """Parses the "Experience" section Notes: either dtstatus or dtend is present (exactly one of them) dtstamp signified 'Present' employee dtstamp is resolved to a binary value (1/0) for profile.current given_name family_name's Experience ----------------------------------- # employers <1 to n> title institution dtstart - [dtstamp|dtend] | location """ jobs = soup.findAll('div', {'id': 'background-experience-container'}) # If profile "Experience Section" exists if jobs: jobs = jobs[0] careers = jobs.findAll('div', {'class': 'editable-item section-item current-position'}) + \ jobs.findAll('div', {'class': 'editable-item section-item past-position'}) for career in careers: title = career.h4.text potential_institutions = career.findAll('h5') institution = potential_institutions[len(potential_institutions) - 1] location = career.findAll("span", {'class': 'locality'}) description = career.findAll("p", {'class': 'description summary-field-show-more'}) time_period = career.findAll('span', {'class':'experience-date-locale'})[0].findAll('time') dtstart = False dtend = False if time_period: dtstart = time_period[0].text if len(time_period) > 1: dtend = time_period[1].text job = {"title": title if title else '', "institution": institution.text if institution else '', "current": 1 if dtend and 'Present' in dtend else 0, "location": location[0].text if location else '', "description": description[0].text if description else '', "date": { "start": dtstart if dtstart else '', "end": dtend if dtend else '' } } profile["employment"].append(job) return profile def education(profile): """Parses the "Education" section""" section_edu = soup.findAll('div', {'id': 'background-education'}) if section_edu: section_edu = section_edu[0] edus = section_edu.findAll("div", {"class": 'editable-item section-item'}) for school in edus: institution = school.h4.text degree = school.findAll('span', {'class': 'degree'}) major = school.findAll('span', {'class': 'major'}) edu_dates = school.findAll('span', {'class':'education-date'}) edu = {"institution": institution if institution else '', "degree": degree[0].text if degree else '', "major": major[0].text if major else '', "dates": edu_dates[0].text if edu_dates else '' } profile["education"].append(edu) return profile def conns(profile): """User's network size""" cs = soup.findAll('dd', {'class': 'overview-connections'}) if cs: profile['connections'] = cs[0].findAll('strong')[0].text return profile def summary(profile): summary_sec = soup.findAll('div', {'id': 'summary-item-view'}) if summary_sec: summary_sec = summary_sec[0] summary_content = summary_sec.findAll('p', {"class": "description"}) if summary_content: profile.summary = summary_content[0].text return profile def similar(profile): """Returns a list of similar profile urls, if they exist""" try: ppl = soup.findAll('div', {'id': 'extra'})[0].findAll('a') profile['similar'] = list(set([a['href'] for a in ppl])) except: pass return profile def techtags(profile): """Adds tech tags if they exist""" tags = soup.findAll('ol', {'id': 'skills-list'}) if tags: profile['skills'] = [li.text for li in tags[0].findAll('li')] return profile def interests(profile): """Estimate interests based on groups / affiliations""" groups = soup.findAll('dd', {'id': 'pubgroups'}) if groups: interests = [i.text for i in groups[0].findAll('li')] profile['interests'] = interests return profile profile = skillset(summary(similar(interests(techtags(conns(header(overview( education(employment(meta(profile))))))))))) return profile if not raw else json.dumps(profile) def custom_search(query, types="mynetwork,company,group,sitefeature,skill", user_agent=('User-agent', 'Mozilla 3.10'), proxies=None): """Returns a json dict whose keys are the 'types'. params: :param query: string to search for :param types: 'mynetwork,company,group,sitefeature,skill' """ def restructure(results): """Removes the unecessary 'resultList' key and maps the type directory to a list of results """ for t in results: results[t] = results[t]['resultList'] return results def fill_missing_types(results): """Fill in missing types (keys) which weren't returned by linkedin """ for t in types.split(','): if t not in results: results[t] = [] return results url = "http://www.linkedin.com/ta/federator?query=%s&types=%s" % (query, types) r = requests.get(url, headers=dict([user_agent]), proxies=proxies) results = r.json() return fill_missing_types(restructure(results)) def people_search(first="", last="", limit=None, user_agent=('User-agent', 'Mozilla 3.10'), proxies=None): """http://www.linkedin.com/pub/dir/?search=Search :params first, last, company: usage: >>> from parseli import people_search >>> people_search(first='mek', limit=3) {'people': [{'location': u'&#xc5;rhus Area, Denmark', 'name': {'first': u'Mek', 'last': u'Falk'}, 'title': u'Owner at Alive Music', 'url': u'http://dk.linkedin.com/pub/mek-falk/1b/8a2/4a9'}, {'location': u'Copenhagen Area, Denmark', 'name': {'first': u'Mek', 'last': u'Nielsen'}, 'title': '', 'url': u'http://dk.linkedin.com/in/meknielsen'}, {'location': u'San Francisco Bay Area', 'name': {'first': u'Mek', 'last': u'Karpeles'}, 'title': u'Founder and CEO at Hackerlist, Inc', 'url': u'http://www.linkedin.com/in/mekarpeles'}], 'summary': {'limit': 25, 'total': 169} } """ def parse_serp(html, limit): """params: :param html: html of the people results page :parma limit: only return 'limit' people """ serp = {'people': []} soup = BeautifulSoup(html) serpcnt = soup.findAll('ul', {'class': 'result-summary same-name-dir'}) if serpcnt: pagetotal, of, total = serpcnt[0].text.split(" ")[:3] serp['summary'] = {'limit': int(pagetotal.replace(',', '')), 'total': int(total.replace(',', '')) } vcards = soup.findAll('li', {'class': 'vcard'}) for vcard in vcards[:limit]: person = {} details = vcard.findAll('h2')[0].findAll('a')[0] location = vcard.findAll('span', {'class': 'location'}) title = vcard.findAll('dd', {'class': 'current-content'}) names = (name.text for name in details.findAll('span')) try: person['name'] = dict(zip(('first', 'last'), names)) except: person['name'] = {'nick': names[0]} person['url'] = details['href'] person['location'] = location[0].text if location else "" person['title'] = title[0].text if title else "" serp['people'].append(person) return serp url = "http://www.linkedin.com/pub/dir/" \ "?first=%s&last=%s&search=Search&searchType=fps" % (first, last) r = requests.post(url, headers=dict([user_agent]), proxies=proxies) html = r.content return parse_serp(html, limit) def company_search(company, limit=None, user_agent=('User-agent', 'Mozilla 3.10'), proxies=None): """Search for companies usage: >>> from parseli import company_search >>> company_search('google', limit=1) [{u'displayName': u'LinkedIn', u'headLine': u'LinkedIn', u'id': u'1337', u'imageUrl': u'http://media.licdn.com/mpr/mpr/shrink_40_40/p/3/000/248/137/3f632c3.png', u'size': {'lower': 1001, 'upper': 5000}, u'subLine': u'Internet; 1001-5000 employees', u'url': u'http://www.linkedin.com/company/1337'}] """ url = "http://www.linkedin.com/ta/company?query=%s" % company r = requests.get(url, headers=dict([user_agent]), proxies=proxies) companies = r.json()['resultList'][:limit] for company in companies: if 'headLine' in company and '<strong>' in company['headLine']: company['headLine'] = company['headLine'].replace("<strong>", "")\ .replace("</strong>", "") if 'subLine' in company: try: size, _ = company['subLine'].split(" ")[-2:] size = size.replace('+', '').replace(',', '') size1, size2 = (size, size) if "-" not in size else size.split('-') size1, size2 = int(size1), int(size2) except: size1, size2 = (None, None) company[u'size'] = {'lower': size1, 'upper': size2} return companies
# GamePlay 3D Blender Scene Viewer # # Copyright 2013 Ithai Levi # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # parts of the script is copied from http://blenderartists.org/forum/showthread.php?255246-Rotate-whole-scene-around-x-90-and-apply-rotation import bpy,subprocess,shutil,os,sys try: import io_scene_fbx.export_fbx except: print('error: io_scene_fbx.export_fbx not found.') # This might need to be bpy.Quit() raise import math from mathutils import Matrix from functools import cmp_to_key bl_info = {"name": "GamePlay 3D Scene Viewer", "category": "User"} bpy.types.Scene.encoder_xml = bpy.props.BoolProperty(name="Generate XML",default=False) bpy.types.Scene.rotatex = bpy.props.BoolProperty(name="Rotate X-90",default=False) bpy.types.Scene.encoder_group = bpy.props.BoolProperty(name="Group Animations",default=False) bpy.types.Scene.encoder_genmat = bpy.props.BoolProperty(name="Generate Materials",default=True) bpy.types.Scene.encoder_path = bpy.props.StringProperty(name="Encoder Path",subtype="FILE_PATH",default="") bpy.types.Scene.viewer_path = bpy.props.StringProperty(name="Viewer Path",subtype="FILE_PATH",default="") bpy.types.Scene.game_path = bpy.props.StringProperty(name="Workspace",subtype="FILE_PATH",default="") # SORTING HELPERS (sort list of objects, parents prior to children) # root object -> 0, first child -> 1, ... def myDepth(o): if o == None: return 0 if o.parent == None: return 0 else: return 1 + myDepth(o.parent) # compare: parent prior child def myDepthCompare(a,b): da = myDepth(a) db = myDepth(b) if da < db: return -1 elif da > db: return 1 else: return 0 def rotateScene(ang): matPatch = Matrix.Rotation(ang, 4, 'X') # deselect everything to close edit / pose mode etc. bpy.context.scene.objects.active = None # activate all 20 layers for i in range(0, 20): bpy.data.scenes[0].layers[i] = True; # show all root objects for obj in bpy.data.objects: obj.hide = False; # make single user (otherwise import fails on instances!) --> no instance anymore bpy.ops.object.make_single_user(type='ALL', object=True, obdata=True) # prepare rotation-sensitive data # a) deactivate animation constraints # b) apply mirror modifiers for obj in bpy.data.objects: # only posed objects if obj.pose is not None: # check constraints for all bones for pBone in obj.pose.bones: for constraint in pBone.constraints: # for now only deactivate limit_location if constraint.type == 'LIMIT_LOCATION': constraint.mute = True # need to activate current object to apply modifiers bpy.context.scene.objects.active = obj for modifier in obj.modifiers: # if you want to delete only UV_project modifiers if modifier.type == 'MIRROR': bpy.ops.object.modifier_apply(apply_as='DATA', modifier=modifier.name) # deselect again, deterministic behaviour! bpy.context.scene.objects.active = None # Iterate the objects in the file, only root level and rotate them for obj in bpy.data.objects: if obj.parent != None: continue obj.matrix_world = matPatch * obj.matrix_world # deselect everything for obj in bpy.data.objects: obj.select = False; # apply all(!) transforms # parent prior child for obj in sorted(bpy.data.objects, key=cmp_to_key(myDepthCompare)): obj.select = True; bpy.ops.object.transform_apply(rotation=True) # deselect again obj.select = False; #------------------------------------------------------------ class SceneView(bpy.types.Operator): """the GamePlay 3D scene viewer""" bl_idname = "scene.gameplayview" bl_label = "Preview" @staticmethod def isMacApp(svp): return sys.platform == 'darwin' and \ os.path.isdir(svp) and \ (svp.endswith('.app') or svp.endswith('.app/')) @classmethod def poll(cls, context): svp = bpy.context.scene.viewer_path enc = bpy.context.scene.encoder_path gdir = bpy.context.scene.game_path try: with open(svp): pass except IOError: return False try: with open(enc): pass except IOError: return False if gdir!='': return os.path.isdir(gdir) else: return True def execute(self, context): # set mode to 'OBJECT' for obj in bpy.context.scene.objects: if obj.type == 'MESH': bpy.context.scene.objects.active = obj bpy.ops.object.mode_set(mode='OBJECT') if bpy.context.scene.rotatex: # Rotate -90 around the X-axis rotateScene(-math.pi / 2.0) axisForward='Y' axisUp='Z' else: axisForward='Z' axisUp='Y' sve = bpy.context.scene.viewer_path svp = bpy.context.scene.game_path resdir = '' macApp = SceneView.isMacApp(sve) if macApp and sve.endswith('.app/'): sve = sve[:-1] if svp=='': if macApp: svp = os.path.normpath(os.path.join(sve, os.pardir)) resdir = os.path.join(sve, 'Contents', 'Resources', 'res') else: svp = os.path.dirname(sve) if resdir=='': resdir = os.path.join(svp, 'res') enc = bpy.context.scene.encoder_path barename = os.path.splitext(bpy.path.basename(bpy.context.blend_data.filepath))[0] sfp = os.path.join(resdir,barename) bpy.ops.export_scene.fbx(filepath=sfp+".fbx", check_existing=True, filter_glob="*.fbx", use_selection=False, global_scale=1.0, axis_forward=axisForward, axis_up=axisUp, object_types={'EMPTY', 'MESH','LAMP', 'CAMERA', 'ARMATURE'}, use_mesh_modifiers=True, mesh_smooth_type='FACE', use_mesh_edges=False, use_armature_deform_only=False, use_anim=True, use_anim_action_all=True, use_default_take=True, use_anim_optimize=False, anim_optimize_precision=6.0, path_mode='AUTO', batch_mode='OFF', use_batch_own_dir=True, use_metadata=True) args = [enc] if bpy.context.scene.encoder_group: args.append("-g:auto") else: args.append("-g:none") args.append("-g:off") if bpy.context.scene.encoder_genmat: args.append("-m") args.append(sfp+".fbx") if bpy.context.scene.encoder_xml: args.insert(1, "-t") subprocess.call(args) for img in bpy.data.images.keys(): if bpy.data.images[img].source=='FILE' and os.path.dirname(bpy.data.images[img].filepath) != resdir and os.path.exists(bpy.data.images[img].filepath): shutil.copy(bpy.data.images[img].filepath,resdir) if not bpy.context.scene.encoder_xml: # Execute scene-viewer if macApp: subprocess.Popen(['/usr/bin/open',sve,'--args',barename],cwd=svp) else: subprocess.Popen([sve,barename],cwd=svp) else: # Execute the platform's text editor pltfm = bpy.app.build_platform.decode('utf-8').lower() runcmd = '' if pltfm.startswith('win'): # runcmd = 'start "" "'+sfp+'.xml"' # too slow on xp32 runcmd = 'explorer "'+sfp+'.xml"' os.system('echo scene-viewer - Executing: '+runcmd) elif pltfm.startswith('darwin'): # mac # runcmd = '/usr/bin/open -a TextEdit "'+sfp+'.xml"' # Use an app (TextEdit) runcmd = '/usr/bin/open -t "'+sfp+'.xml"' # Use default text editor os.system('echo scene-viewer - Executing: '+runcmd) elif pltfm.startswith('linux'): # runcmd = '( /usr/bin/gedit "'+sfp+'.xml" ) &' # Use a custom command # # You may substitute the MYPREFERRED string with your graphical text editor (for example: MYPREFERRED="chromium-browser"). # Please don't specify a text editor with no X window interface (like Vim), or a silent process will be open in background. runcmd = '''( MYPREFERRED=""; ALREADY=0 ; runcmd() { if [ $ALREADY = 0 -a ! -z "${2}" -a ! -z "`which ${1}`" ]; then echo "scene-viewer - Executing:" ${1} ${2} ; "${1}" "${2}" ; ALREADY=1 ; fi } ; F="'''+sfp+'''.xml"; runcmd "${MYPREFERRED}" "${F}" ; runcmd "gedit" "${F}" ; runcmd "kate" "${F}" ; runcmd "leafpad" "${F}" ; ) &''' os.system(runcmd) # If you want to disable the text editor, please comment this line with a '#' character. if bpy.context.scene.rotatex: # Rotate 90 around the X-axis rotateScene(math.pi / 2.0) # or use undo, so it rotates scene back (and get instances back) #bpy.ops.ed.undo() return {"FINISHED"} class GameplayPanel(bpy.types.Panel): bl_space_type = "VIEW_3D" bl_region_type = "UI" bl_label = "Gameplay" bl_idname = "SCENE_PT_layout" def draw(self, context): layout = self.layout layout.prop(context.scene, "encoder_path") layout.prop(context.scene, "viewer_path") layout.prop(context.scene, "game_path") layout.prop(context.scene, "encoder_genmat") layout.prop(context.scene, "encoder_group") layout.prop(context.scene, "rotatex") layout.prop(context.scene, "encoder_xml") layout.operator("scene.gameplayview") def register(): bpy.utils.register_class(SceneView) bpy.utils.register_class(GameplayPanel) def unregister(): bpy.utils.unregister_class(SceneView) bpy.utils.unregister_class(GameplayPanel)
import six from datetime import datetime from .. import errors from .. import utils from ..constants import DEFAULT_DATA_CHUNK_SIZE from ..types import ( CancellableStream, ContainerConfig, EndpointConfig, HostConfig, NetworkingConfig ) class ContainerApiMixin(object): @utils.check_resource('container') def attach(self, container, stdout=True, stderr=True, stream=False, logs=False, demux=False): """ Attach to a container. The ``.logs()`` function is a wrapper around this method, which you can use instead if you want to fetch/stream container output without first retrieving the entire backlog. Args: container (str): The container to attach to. stdout (bool): Include stdout. stderr (bool): Include stderr. stream (bool): Return container output progressively as an iterator of strings, rather than a single string. logs (bool): Include the container's previous output. demux (bool): Keep stdout and stderr separate. Returns: By default, the container's output as a single string (two if ``demux=True``: one for stdout and one for stderr). If ``stream=True``, an iterator of output strings. If ``demux=True``, two iterators are returned: one for stdout and one for stderr. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = { 'logs': logs and 1 or 0, 'stdout': stdout and 1 or 0, 'stderr': stderr and 1 or 0, 'stream': stream and 1 or 0 } headers = { 'Connection': 'Upgrade', 'Upgrade': 'tcp' } u = self._url("/containers/{0}/attach", container) response = self._post(u, headers=headers, params=params, stream=True) output = self._read_from_socket( response, stream, self._check_is_tty(container), demux=demux) if stream: return CancellableStream(output, response) else: return output @utils.check_resource('container') def attach_socket(self, container, params=None, ws=False): """ Like ``attach``, but returns the underlying socket-like object for the HTTP request. Args: container (str): The container to attach to. params (dict): Dictionary of request parameters (e.g. ``stdout``, ``stderr``, ``stream``). For ``detachKeys``, ~/.docker/config.json is used by default. ws (bool): Use websockets instead of raw HTTP. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ if params is None: params = { 'stdout': 1, 'stderr': 1, 'stream': 1 } if 'detachKeys' not in params \ and 'detachKeys' in self._general_configs: params['detachKeys'] = self._general_configs['detachKeys'] if ws: return self._attach_websocket(container, params) headers = { 'Connection': 'Upgrade', 'Upgrade': 'tcp' } u = self._url("/containers/{0}/attach", container) return self._get_raw_response_socket( self.post( u, None, params=self._attach_params(params), stream=True, headers=headers ) ) @utils.check_resource('container') def commit(self, container, repository=None, tag=None, message=None, author=None, changes=None, conf=None): """ Commit a container to an image. Similar to the ``docker commit`` command. Args: container (str): The image hash of the container repository (str): The repository to push the image to tag (str): The tag to push message (str): A commit message author (str): The name of the author changes (str): Dockerfile instructions to apply while committing conf (dict): The configuration for the container. See the `Engine API documentation <https://docs.docker.com/reference/api/docker_remote_api/>`_ for full details. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = { 'container': container, 'repo': repository, 'tag': tag, 'comment': message, 'author': author, 'changes': changes } u = self._url("/commit") return self._result( self._post_json(u, data=conf, params=params), json=True ) def containers(self, quiet=False, all=False, trunc=False, latest=False, since=None, before=None, limit=-1, size=False, filters=None): """ List containers. Similar to the ``docker ps`` command. Args: quiet (bool): Only display numeric Ids all (bool): Show all containers. Only running containers are shown by default trunc (bool): Truncate output latest (bool): Show only the latest created container, include non-running ones. since (str): Show only containers created since Id or Name, include non-running ones before (str): Show only container created before Id or Name, include non-running ones limit (int): Show `limit` last created containers, include non-running ones size (bool): Display sizes filters (dict): Filters to be processed on the image list. Available filters: - `exited` (int): Only containers with specified exit code - `status` (str): One of ``restarting``, ``running``, ``paused``, ``exited`` - `label` (str): format either ``"key"`` or ``"key=value"`` - `id` (str): The id of the container. - `name` (str): The name of the container. - `ancestor` (str): Filter by container ancestor. Format of ``<image-name>[:tag]``, ``<image-id>``, or ``<image@digest>``. - `before` (str): Only containers created before a particular container. Give the container name or id. - `since` (str): Only containers created after a particular container. Give container name or id. A comprehensive list can be found in the documentation for `docker ps <https://docs.docker.com/engine/reference/commandline/ps>`_. Returns: A list of dicts, one per container Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = { 'limit': 1 if latest else limit, 'all': 1 if all else 0, 'size': 1 if size else 0, 'trunc_cmd': 1 if trunc else 0, 'since': since, 'before': before } if filters: params['filters'] = utils.convert_filters(filters) u = self._url("/containers/json") res = self._result(self._get(u, params=params), True) if quiet: return [{'Id': x['Id']} for x in res] if trunc: for x in res: x['Id'] = x['Id'][:12] return res def create_container(self, image, command=None, hostname=None, user=None, detach=False, stdin_open=False, tty=False, ports=None, environment=None, volumes=None, network_disabled=False, name=None, entrypoint=None, working_dir=None, domainname=None, host_config=None, mac_address=None, labels=None, stop_signal=None, networking_config=None, healthcheck=None, stop_timeout=None, runtime=None, use_config_proxy=False): """ Creates a container. Parameters are similar to those for the ``docker run`` command except it doesn't support the attach options (``-a``). The arguments that are passed directly to this function are host-independent configuration options. Host-specific configuration is passed with the `host_config` argument. You'll normally want to use this method in combination with the :py:meth:`create_host_config` method to generate ``host_config``. **Port bindings** Port binding is done in two parts: first, provide a list of ports to open inside the container with the ``ports`` parameter, then declare bindings with the ``host_config`` parameter. For example: .. code-block:: python container_id = cli.create_container( 'busybox', 'ls', ports=[1111, 2222], host_config=cli.create_host_config(port_bindings={ 1111: 4567, 2222: None }) ) You can limit the host address on which the port will be exposed like such: .. code-block:: python cli.create_host_config(port_bindings={1111: ('127.0.0.1', 4567)}) Or without host port assignment: .. code-block:: python cli.create_host_config(port_bindings={1111: ('127.0.0.1',)}) If you wish to use UDP instead of TCP (default), you need to declare ports as such in both the config and host config: .. code-block:: python container_id = cli.create_container( 'busybox', 'ls', ports=[(1111, 'udp'), 2222], host_config=cli.create_host_config(port_bindings={ '1111/udp': 4567, 2222: None }) ) To bind multiple host ports to a single container port, use the following syntax: .. code-block:: python cli.create_host_config(port_bindings={ 1111: [1234, 4567] }) You can also bind multiple IPs to a single container port: .. code-block:: python cli.create_host_config(port_bindings={ 1111: [ ('192.168.0.100', 1234), ('192.168.0.101', 1234) ] }) **Using volumes** Volume declaration is done in two parts. Provide a list of paths to use as mountpoints inside the container with the ``volumes`` parameter, and declare mappings from paths on the host in the ``host_config`` section. .. code-block:: python container_id = cli.create_container( 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], host_config=cli.create_host_config(binds={ '/home/user1/': { 'bind': '/mnt/vol2', 'mode': 'rw', }, '/var/www': { 'bind': '/mnt/vol1', 'mode': 'ro', } }) ) You can alternatively specify binds as a list. This code is equivalent to the example above: .. code-block:: python container_id = cli.create_container( 'busybox', 'ls', volumes=['/mnt/vol1', '/mnt/vol2'], host_config=cli.create_host_config(binds=[ '/home/user1/:/mnt/vol2', '/var/www:/mnt/vol1:ro', ]) ) **Networking** You can specify networks to connect the container to by using the ``networking_config`` parameter. At the time of creation, you can only connect a container to a single networking, but you can create more connections by using :py:meth:`~connect_container_to_network`. For example: .. code-block:: python networking_config = docker_client.create_networking_config({ 'network1': docker_client.create_endpoint_config( ipv4_address='172.28.0.124', aliases=['foo', 'bar'], links=['container2'] ) }) ctnr = docker_client.create_container( img, command, networking_config=networking_config ) Args: image (str): The image to run command (str or list): The command to be run in the container hostname (str): Optional hostname for the container user (str or int): Username or UID detach (bool): Detached mode: run container in the background and return container ID stdin_open (bool): Keep STDIN open even if not attached tty (bool): Allocate a pseudo-TTY ports (list of ints): A list of port numbers environment (dict or list): A dictionary or a list of strings in the following format ``["PASSWORD=xxx"]`` or ``{"PASSWORD": "xxx"}``. volumes (str or list): List of paths inside the container to use as volumes. network_disabled (bool): Disable networking name (str): A name for the container entrypoint (str or list): An entrypoint working_dir (str): Path to the working directory domainname (str): The domain name to use for the container host_config (dict): A dictionary created with :py:meth:`create_host_config`. mac_address (str): The Mac Address to assign the container labels (dict or list): A dictionary of name-value labels (e.g. ``{"label1": "value1", "label2": "value2"}``) or a list of names of labels to set with empty values (e.g. ``["label1", "label2"]``) stop_signal (str): The stop signal to use to stop the container (e.g. ``SIGINT``). stop_timeout (int): Timeout to stop the container, in seconds. Default: 10 networking_config (dict): A networking configuration generated by :py:meth:`create_networking_config`. runtime (str): Runtime to use with this container. healthcheck (dict): Specify a test to perform to check that the container is healthy. use_config_proxy (bool): If ``True``, and if the docker client configuration file (``~/.docker/config.json`` by default) contains a proxy configuration, the corresponding environment variables will be set in the container being created. Returns: A dictionary with an image 'Id' key and a 'Warnings' key. Raises: :py:class:`docker.errors.ImageNotFound` If the specified image does not exist. :py:class:`docker.errors.APIError` If the server returns an error. """ if isinstance(volumes, six.string_types): volumes = [volumes, ] if isinstance(environment, dict): environment = utils.utils.format_environment(environment) if use_config_proxy: environment = self._proxy_configs.inject_proxy_environment( environment ) config = self.create_container_config( image, command, hostname, user, detach, stdin_open, tty, ports, environment, volumes, network_disabled, entrypoint, working_dir, domainname, host_config, mac_address, labels, stop_signal, networking_config, healthcheck, stop_timeout, runtime ) return self.create_container_from_config(config, name) def create_container_config(self, *args, **kwargs): return ContainerConfig(self._version, *args, **kwargs) def create_container_from_config(self, config, name=None): u = self._url("/containers/create") params = { 'name': name } res = self._post_json(u, data=config, params=params) return self._result(res, True) def create_host_config(self, *args, **kwargs): """ Create a dictionary for the ``host_config`` argument to :py:meth:`create_container`. Args: auto_remove (bool): enable auto-removal of the container on daemon side when the container's process exits. binds (dict): Volumes to bind. See :py:meth:`create_container` for more information. blkio_weight_device: Block IO weight (relative device weight) in the form of: ``[{"Path": "device_path", "Weight": weight}]``. blkio_weight: Block IO weight (relative weight), accepts a weight value between 10 and 1000. cap_add (list of str): Add kernel capabilities. For example, ``["SYS_ADMIN", "MKNOD"]``. cap_drop (list of str): Drop kernel capabilities. cpu_period (int): The length of a CPU period in microseconds. cpu_quota (int): Microseconds of CPU time that the container can get in a CPU period. cpu_shares (int): CPU shares (relative weight). cpuset_cpus (str): CPUs in which to allow execution (``0-3``, ``0,1``). cpuset_mems (str): Memory nodes (MEMs) in which to allow execution (``0-3``, ``0,1``). Only effective on NUMA systems. device_cgroup_rules (:py:class:`list`): A list of cgroup rules to apply to the container. device_read_bps: Limit read rate (bytes per second) from a device in the form of: `[{"Path": "device_path", "Rate": rate}]` device_read_iops: Limit read rate (IO per second) from a device. device_write_bps: Limit write rate (bytes per second) from a device. device_write_iops: Limit write rate (IO per second) from a device. devices (:py:class:`list`): Expose host devices to the container, as a list of strings in the form ``<path_on_host>:<path_in_container>:<cgroup_permissions>``. For example, ``/dev/sda:/dev/xvda:rwm`` allows the container to have read-write access to the host's ``/dev/sda`` via a node named ``/dev/xvda`` inside the container. dns (:py:class:`list`): Set custom DNS servers. dns_opt (:py:class:`list`): Additional options to be added to the container's ``resolv.conf`` file dns_search (:py:class:`list`): DNS search domains. extra_hosts (dict): Additional hostnames to resolve inside the container, as a mapping of hostname to IP address. group_add (:py:class:`list`): List of additional group names and/or IDs that the container process will run as. init (bool): Run an init inside the container that forwards signals and reaps processes init_path (str): Path to the docker-init binary ipc_mode (str): Set the IPC mode for the container. isolation (str): Isolation technology to use. Default: ``None``. links (dict): Mapping of links using the ``{'container': 'alias'}`` format. The alias is optional. Containers declared in this dict will be linked to the new container using the provided alias. Default: ``None``. log_config (LogConfig): Logging configuration lxc_conf (dict): LXC config. mem_limit (float or str): Memory limit. Accepts float values (which represent the memory limit of the created container in bytes) or a string with a units identification char (``100000b``, ``1000k``, ``128m``, ``1g``). If a string is specified without a units character, bytes are assumed as an mem_swappiness (int): Tune a container's memory swappiness behavior. Accepts number between 0 and 100. memswap_limit (str or int): Maximum amount of memory + swap a container is allowed to consume. mounts (:py:class:`list`): Specification for mounts to be added to the container. More powerful alternative to ``binds``. Each item in the list is expected to be a :py:class:`docker.types.Mount` object. network_mode (str): One of: - ``bridge`` Create a new network stack for the container on on the bridge network. - ``none`` No networking for this container. - ``container:<name|id>`` Reuse another container's network stack. - ``host`` Use the host network stack. oom_kill_disable (bool): Whether to disable OOM killer. oom_score_adj (int): An integer value containing the score given to the container in order to tune OOM killer preferences. pid_mode (str): If set to ``host``, use the host PID namespace inside the container. pids_limit (int): Tune a container's pids limit. Set ``-1`` for unlimited. port_bindings (dict): See :py:meth:`create_container` for more information. privileged (bool): Give extended privileges to this container. publish_all_ports (bool): Publish all ports to the host. read_only (bool): Mount the container's root filesystem as read only. restart_policy (dict): Restart the container when it exits. Configured as a dictionary with keys: - ``Name`` One of ``on-failure``, or ``always``. - ``MaximumRetryCount`` Number of times to restart the container on failure. security_opt (:py:class:`list`): A list of string values to customize labels for MLS systems, such as SELinux. shm_size (str or int): Size of /dev/shm (e.g. ``1G``). storage_opt (dict): Storage driver options per container as a key-value mapping. sysctls (dict): Kernel parameters to set in the container. tmpfs (dict): Temporary filesystems to mount, as a dictionary mapping a path inside the container to options for that path. For example: .. code-block:: python { '/mnt/vol2': '', '/mnt/vol1': 'size=3G,uid=1000' } ulimits (:py:class:`list`): Ulimits to set inside the container, as a list of :py:class:`docker.types.Ulimit` instances. userns_mode (str): Sets the user namespace mode for the container when user namespace remapping option is enabled. Supported values are: ``host`` uts_mode (str): Sets the UTS namespace mode for the container. Supported values are: ``host`` volumes_from (:py:class:`list`): List of container names or IDs to get volumes from. runtime (str): Runtime to use with this container. Returns: (dict) A dictionary which can be passed to the ``host_config`` argument to :py:meth:`create_container`. Example: >>> cli.create_host_config(privileged=True, cap_drop=['MKNOD'], volumes_from=['nostalgic_newton']) {'CapDrop': ['MKNOD'], 'LxcConf': None, 'Privileged': True, 'VolumesFrom': ['nostalgic_newton'], 'PublishAllPorts': False} """ if not kwargs: kwargs = {} if 'version' in kwargs: raise TypeError( "create_host_config() got an unexpected " "keyword argument 'version'" ) kwargs['version'] = self._version return HostConfig(*args, **kwargs) def create_networking_config(self, *args, **kwargs): """ Create a networking config dictionary to be used as the ``networking_config`` parameter in :py:meth:`create_container`. Args: endpoints_config (dict): A dictionary mapping network names to endpoint configurations generated by :py:meth:`create_endpoint_config`. Returns: (dict) A networking config. Example: >>> docker_client.create_network('network1') >>> networking_config = docker_client.create_networking_config({ 'network1': docker_client.create_endpoint_config() }) >>> container = docker_client.create_container( img, command, networking_config=networking_config ) """ return NetworkingConfig(*args, **kwargs) def create_endpoint_config(self, *args, **kwargs): """ Create an endpoint config dictionary to be used with :py:meth:`create_networking_config`. Args: aliases (:py:class:`list`): A list of aliases for this endpoint. Names in that list can be used within the network to reach the container. Defaults to ``None``. links (dict): Mapping of links for this endpoint using the ``{'container': 'alias'}`` format. The alias is optional. Containers declared in this dict will be linked to this container using the provided alias. Defaults to ``None``. ipv4_address (str): The IP address of this container on the network, using the IPv4 protocol. Defaults to ``None``. ipv6_address (str): The IP address of this container on the network, using the IPv6 protocol. Defaults to ``None``. link_local_ips (:py:class:`list`): A list of link-local (IPv4/IPv6) addresses. Returns: (dict) An endpoint config. Example: >>> endpoint_config = client.create_endpoint_config( aliases=['web', 'app'], links={'app_db': 'db', 'another': None}, ipv4_address='132.65.0.123' ) """ return EndpointConfig(self._version, *args, **kwargs) @utils.check_resource('container') def diff(self, container): """ Inspect changes on a container's filesystem. Args: container (str): The container to diff Returns: (str) Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return self._result( self._get(self._url("/containers/{0}/changes", container)), True ) @utils.check_resource('container') def export(self, container, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Export the contents of a filesystem as a tar archive. Args: container (str): The container to export chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB Returns: (generator): The archived filesystem data stream Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ res = self._get( self._url("/containers/{0}/export", container), stream=True ) return self._stream_raw_result(res, chunk_size, False) @utils.check_resource('container') def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Retrieve a file or folder from a container in the form of a tar archive. Args: container (str): The container where the file is located path (str): Path to the file or folder to retrieve chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB Returns: (tuple): First element is a raw tar data stream. Second element is a dict containing ``stat`` information on the specified ``path``. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> c = docker.APIClient() >>> f = open('./sh_bin.tar', 'wb') >>> bits, stat = c.get_archive(container, '/bin/sh') >>> print(stat) {'name': 'sh', 'size': 1075464, 'mode': 493, 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} >>> for chunk in bits: ... f.write(chunk) >>> f.close() """ params = { 'path': path } url = self._url('/containers/{0}/archive', container) res = self._get(url, params=params, stream=True) self._raise_for_status(res) encoded_stat = res.headers.get('x-docker-container-path-stat') return ( self._stream_raw_result(res, chunk_size, False), utils.decode_json_header(encoded_stat) if encoded_stat else None ) @utils.check_resource('container') def inspect_container(self, container): """ Identical to the `docker inspect` command, but only for containers. Args: container (str): The container to inspect Returns: (dict): Similar to the output of `docker inspect`, but as a single dict Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ return self._result( self._get(self._url("/containers/{0}/json", container)), True ) @utils.check_resource('container') def kill(self, container, signal=None): """ Kill a container or send a signal to a container. Args: container (str): The container to kill signal (str or int): The signal to send. Defaults to ``SIGKILL`` Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ url = self._url("/containers/{0}/kill", container) params = {} if signal is not None: if not isinstance(signal, six.string_types): signal = int(signal) params['signal'] = signal res = self._post(url, params=params) self._raise_for_status(res) @utils.check_resource('container') def logs(self, container, stdout=True, stderr=True, stream=False, timestamps=False, tail='all', since=None, follow=None, until=None): """ Get logs from a container. Similar to the ``docker logs`` command. The ``stream`` parameter makes the ``logs`` function return a blocking generator you can iterate over to retrieve log output as it happens. Args: container (str): The container to get logs from stdout (bool): Get ``STDOUT``. Default ``True`` stderr (bool): Get ``STDERR``. Default ``True`` stream (bool): Stream the response. Default ``False`` timestamps (bool): Show timestamps. Default ``False`` tail (str or int): Output specified number of lines at the end of logs. Either an integer of number of lines or the string ``all``. Default ``all`` since (datetime or int): Show logs since a given datetime or integer epoch (in seconds) follow (bool): Follow log output. Default ``False`` until (datetime or int): Show logs that occurred before the given datetime or integer epoch (in seconds) Returns: (generator or str) Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ if follow is None: follow = stream params = {'stderr': stderr and 1 or 0, 'stdout': stdout and 1 or 0, 'timestamps': timestamps and 1 or 0, 'follow': follow and 1 or 0, } if tail != 'all' and (not isinstance(tail, int) or tail < 0): tail = 'all' params['tail'] = tail if since is not None: if isinstance(since, datetime): params['since'] = utils.datetime_to_timestamp(since) elif (isinstance(since, int) and since > 0): params['since'] = since else: raise errors.InvalidArgument( 'since value should be datetime or positive int, ' 'not {}'.format(type(since)) ) if until is not None: if utils.version_lt(self._version, '1.35'): raise errors.InvalidVersion( 'until is not supported for API version < 1.35' ) if isinstance(until, datetime): params['until'] = utils.datetime_to_timestamp(until) elif (isinstance(until, int) and until > 0): params['until'] = until else: raise errors.InvalidArgument( 'until value should be datetime or positive int, ' 'not {}'.format(type(until)) ) url = self._url("/containers/{0}/logs", container) res = self._get(url, params=params, stream=stream) output = self._get_result(container, stream, res) if stream: return CancellableStream(output, res) else: return output @utils.check_resource('container') def pause(self, container): """ Pauses all processes within a container. Args: container (str): The container to pause Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ url = self._url('/containers/{0}/pause', container) res = self._post(url) self._raise_for_status(res) @utils.check_resource('container') def port(self, container, private_port): """ Lookup the public-facing port that is NAT-ed to ``private_port``. Identical to the ``docker port`` command. Args: container (str): The container to look up private_port (int): The private port to inspect Returns: (list of dict): The mapping for the host ports Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: .. code-block:: bash $ docker run -d -p 80:80 ubuntu:14.04 /bin/sleep 30 7174d6347063a83f412fad6124c99cffd25ffe1a0807eb4b7f9cec76ac8cb43b .. code-block:: python >>> cli.port('7174d6347063', 80) [{'HostIp': '0.0.0.0', 'HostPort': '80'}] """ res = self._get(self._url("/containers/{0}/json", container)) self._raise_for_status(res) json_ = res.json() private_port = str(private_port) h_ports = None # Port settings is None when the container is running with # network_mode=host. port_settings = json_.get('NetworkSettings', {}).get('Ports') if port_settings is None: return None if '/' in private_port: return port_settings.get(private_port) h_ports = port_settings.get(private_port + '/tcp') if h_ports is None: h_ports = port_settings.get(private_port + '/udp') return h_ports @utils.check_resource('container') def put_archive(self, container, path, data): """ Insert a file or folder in an existing container using a tar archive as source. Args: container (str): The container where the file(s) will be extracted path (str): Path inside the container where the file(s) will be extracted. Must exist. data (bytes): tar data to be extracted Returns: (bool): True if the call succeeds. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = {'path': path} url = self._url('/containers/{0}/archive', container) res = self._put(url, params=params, data=data) self._raise_for_status(res) return res.status_code == 200 @utils.minimum_version('1.25') def prune_containers(self, filters=None): """ Delete stopped containers Args: filters (dict): Filters to process on the prune list. Returns: (dict): A dict containing a list of deleted container IDs and the amount of disk space reclaimed in bytes. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = {} if filters: params['filters'] = utils.convert_filters(filters) url = self._url('/containers/prune') return self._result(self._post(url, params=params), True) @utils.check_resource('container') def remove_container(self, container, v=False, link=False, force=False): """ Remove a container. Similar to the ``docker rm`` command. Args: container (str): The container to remove v (bool): Remove the volumes associated with the container link (bool): Remove the specified link and not the underlying container force (bool): Force the removal of a running container (uses ``SIGKILL``) Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = {'v': v, 'link': link, 'force': force} res = self._delete( self._url("/containers/{0}", container), params=params ) self._raise_for_status(res) @utils.check_resource('container') def rename(self, container, name): """ Rename a container. Similar to the ``docker rename`` command. Args: container (str): ID of the container to rename name (str): New name for the container Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ url = self._url("/containers/{0}/rename", container) params = {'name': name} res = self._post(url, params=params) self._raise_for_status(res) @utils.check_resource('container') def resize(self, container, height, width): """ Resize the tty session. Args: container (str or dict): The container to resize height (int): Height of tty session width (int): Width of tty session Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = {'h': height, 'w': width} url = self._url("/containers/{0}/resize", container) res = self._post(url, params=params) self._raise_for_status(res) @utils.check_resource('container') def restart(self, container, timeout=10): """ Restart a container. Similar to the ``docker restart`` command. Args: container (str or dict): The container to restart. If a dict, the ``Id`` key is used. timeout (int): Number of seconds to try to stop for before killing the container. Once killed it will then be restarted. Default is 10 seconds. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ params = {'t': timeout} url = self._url("/containers/{0}/restart", container) conn_timeout = self.timeout if conn_timeout is not None: conn_timeout += timeout res = self._post(url, params=params, timeout=conn_timeout) self._raise_for_status(res) @utils.check_resource('container') def start(self, container, *args, **kwargs): """ Start a container. Similar to the ``docker start`` command, but doesn't support attach options. **Deprecation warning:** Passing configuration options in ``start`` is no longer supported. Users are expected to provide host config options in the ``host_config`` parameter of :py:meth:`~ContainerApiMixin.create_container`. Args: container (str): The container to start Raises: :py:class:`docker.errors.APIError` If the server returns an error. :py:class:`docker.errors.DeprecatedMethod` If any argument besides ``container`` are provided. Example: >>> container = cli.create_container( ... image='busybox:latest', ... command='/bin/sleep 30') >>> cli.start(container=container.get('Id')) """ if args or kwargs: raise errors.DeprecatedMethod( 'Providing configuration in the start() method is no longer ' 'supported. Use the host_config param in create_container ' 'instead.' ) url = self._url("/containers/{0}/start", container) res = self._post(url) self._raise_for_status(res) @utils.check_resource('container') def stats(self, container, decode=None, stream=True): """ Stream statistics for a specific container. Similar to the ``docker stats`` command. Args: container (str): The container to stream statistics from decode (bool): If set to true, stream will be decoded into dicts on the fly. Only applicable if ``stream`` is True. False by default. stream (bool): If set to false, only the current stats will be returned instead of a stream. True by default. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ url = self._url("/containers/{0}/stats", container) if stream: return self._stream_helper(self._get(url, stream=True), decode=decode) else: if decode: raise errors.InvalidArgument( "decode is only available in conjuction with stream=True" ) return self._result(self._get(url, params={'stream': False}), json=True) @utils.check_resource('container') def stop(self, container, timeout=None): """ Stops a container. Similar to the ``docker stop`` command. Args: container (str): The container to stop timeout (int): Timeout in seconds to wait for the container to stop before sending a ``SIGKILL``. If None, then the StopTimeout value of the container will be used. Default: None Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ if timeout is None: params = {} timeout = 10 else: params = {'t': timeout} url = self._url("/containers/{0}/stop", container) conn_timeout = self.timeout if conn_timeout is not None: conn_timeout += timeout res = self._post(url, params=params, timeout=conn_timeout) self._raise_for_status(res) @utils.check_resource('container') def top(self, container, ps_args=None): """ Display the running processes of a container. Args: container (str): The container to inspect ps_args (str): An optional arguments passed to ps (e.g. ``aux``) Returns: (str): The output of the top Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ u = self._url("/containers/{0}/top", container) params = {} if ps_args is not None: params['ps_args'] = ps_args return self._result(self._get(u, params=params), True) @utils.check_resource('container') def unpause(self, container): """ Unpause all processes within a container. Args: container (str): The container to unpause """ url = self._url('/containers/{0}/unpause', container) res = self._post(url) self._raise_for_status(res) @utils.minimum_version('1.22') @utils.check_resource('container') def update_container( self, container, blkio_weight=None, cpu_period=None, cpu_quota=None, cpu_shares=None, cpuset_cpus=None, cpuset_mems=None, mem_limit=None, mem_reservation=None, memswap_limit=None, kernel_memory=None, restart_policy=None ): """ Update resource configs of one or more containers. Args: container (str): The container to inspect blkio_weight (int): Block IO (relative weight), between 10 and 1000 cpu_period (int): Limit CPU CFS (Completely Fair Scheduler) period cpu_quota (int): Limit CPU CFS (Completely Fair Scheduler) quota cpu_shares (int): CPU shares (relative weight) cpuset_cpus (str): CPUs in which to allow execution cpuset_mems (str): MEMs in which to allow execution mem_limit (int or str): Memory limit mem_reservation (int or str): Memory soft limit memswap_limit (int or str): Total memory (memory + swap), -1 to disable swap kernel_memory (int or str): Kernel memory limit restart_policy (dict): Restart policy dictionary Returns: (dict): Dictionary containing a ``Warnings`` key. Raises: :py:class:`docker.errors.APIError` If the server returns an error. """ url = self._url('/containers/{0}/update', container) data = {} if blkio_weight: data['BlkioWeight'] = blkio_weight if cpu_period: data['CpuPeriod'] = cpu_period if cpu_shares: data['CpuShares'] = cpu_shares if cpu_quota: data['CpuQuota'] = cpu_quota if cpuset_cpus: data['CpusetCpus'] = cpuset_cpus if cpuset_mems: data['CpusetMems'] = cpuset_mems if mem_limit: data['Memory'] = utils.parse_bytes(mem_limit) if mem_reservation: data['MemoryReservation'] = utils.parse_bytes(mem_reservation) if memswap_limit: data['MemorySwap'] = utils.parse_bytes(memswap_limit) if kernel_memory: data['KernelMemory'] = utils.parse_bytes(kernel_memory) if restart_policy: if utils.version_lt(self._version, '1.23'): raise errors.InvalidVersion( 'restart policy update is not supported ' 'for API version < 1.23' ) data['RestartPolicy'] = restart_policy res = self._post_json(url, data=data) return self._result(res, True) @utils.check_resource('container') def wait(self, container, timeout=None, condition=None): """ Block until a container stops, then return its exit code. Similar to the ``docker wait`` command. Args: container (str or dict): The container to wait on. If a dict, the ``Id`` key is used. timeout (int): Request timeout condition (str): Wait until a container state reaches the given condition, either ``not-running`` (default), ``next-exit``, or ``removed`` Returns: (dict): The API's response as a Python dictionary, including the container's exit code under the ``StatusCode`` attribute. Raises: :py:class:`requests.exceptions.ReadTimeout` If the timeout is exceeded. :py:class:`docker.errors.APIError` If the server returns an error. """ url = self._url("/containers/{0}/wait", container) params = {} if condition is not None: if utils.version_lt(self._version, '1.30'): raise errors.InvalidVersion( 'wait condition is not supported for API version < 1.30' ) params['condition'] = condition res = self._post(url, timeout=timeout, params=params) return self._result(res, True)
# Copyright 2014 Google Inc. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Batch updates / deletes of storage buckets / blobs. See: https://cloud.google.com/storage/docs/json_api/v1/how-tos/batch """ from email.encoders import encode_noop from email.generator import Generator from email.mime.application import MIMEApplication from email.mime.multipart import MIMEMultipart from email.parser import Parser import httplib2 import io import json import six from gcloud.exceptions import make_exception from gcloud.storage.connection import Connection class MIMEApplicationHTTP(MIMEApplication): """MIME type for ``application/http``. Constructs payload from headers and body :type method: string :param method: HTTP method :type uri: string :param uri: URI for HTTP request :type headers: dict :param headers: HTTP headers :type body: text or None :param body: HTTP payload """ def __init__(self, method, uri, headers, body): if isinstance(body, dict): body = json.dumps(body) headers['Content-Type'] = 'application/json' headers['Content-Length'] = len(body) if body is None: body = '' lines = ['%s %s HTTP/1.1' % (method, uri)] lines.extend(['%s: %s' % (key, value) for key, value in sorted(headers.items())]) lines.append('') lines.append(body) payload = '\r\n'.join(lines) if six.PY2: # pragma: NO COVER Python2 # Sigh. email.message.Message is an old-style class, so we # cannot use 'super()'. MIMEApplication.__init__(self, payload, 'http', encode_noop) else: # pragma: NO COVER Python3 super_init = super(MIMEApplicationHTTP, self).__init__ super_init(payload, 'http', encode_noop) class NoContent(object): """Emulate an HTTP '204 No Content' response.""" status = 204 class _FutureDict(object): """Class to hold a future value for a deferred request. Used by for requests that get sent in a :class:`Batch`. """ @staticmethod def get(key, default=None): """Stand-in for dict.get. :type key: object :param key: Hashable dictionary key. :type default: object :param default: Fallback value to dict.get. :raises: :class:`KeyError` always since the future is intended to fail as a dictionary. """ raise KeyError('Cannot get(%r, default=%r) on a future' % ( key, default)) def __getitem__(self, key): """Stand-in for dict[key]. :type key: object :param key: Hashable dictionary key. :raises: :class:`KeyError` always since the future is intended to fail as a dictionary. """ raise KeyError('Cannot get item %r from a future' % (key,)) def __setitem__(self, key, value): """Stand-in for dict[key] = value. :type key: object :param key: Hashable dictionary key. :type value: object :param value: Dictionary value. :raises: :class:`KeyError` always since the future is intended to fail as a dictionary. """ raise KeyError('Cannot set %r -> %r on a future' % (key, value)) class Batch(Connection): """Proxy an underlying connection, batching up change operations. :type client: :class:`gcloud.storage.client.Client` :param client: The client to use for making connections. """ _MAX_BATCH_SIZE = 1000 def __init__(self, client): super(Batch, self).__init__() self._client = client self._requests = [] self._target_objects = [] def _do_request(self, method, url, headers, data, target_object): """Override Connection: defer actual HTTP request. Only allow up to ``_MAX_BATCH_SIZE`` requests to be deferred. :type method: string :param method: The HTTP method to use in the request. :type url: string :param url: The URL to send the request to. :type headers: dict :param headers: A dictionary of HTTP headers to send with the request. :type data: string :param data: The data to send as the body of the request. :type target_object: object or :class:`NoneType` :param target_object: This allows us to enable custom behavior in our batch connection. Here we defer an HTTP request and complete initialization of the object at a later time. :rtype: tuple of ``response`` (a dictionary of sorts) and ``content`` (a string). :returns: The HTTP response object and the content of the response. """ if len(self._requests) >= self._MAX_BATCH_SIZE: raise ValueError("Too many deferred requests (max %d)" % self._MAX_BATCH_SIZE) self._requests.append((method, url, headers, data)) result = _FutureDict() self._target_objects.append(target_object) if target_object is not None: target_object._properties = result return NoContent(), result def _prepare_batch_request(self): """Prepares headers and body for a batch request. :rtype: tuple (dict, string) :returns: The pair of headers and body of the batch request to be sent. :raises: :class:`ValueError` if no requests have been deferred. """ if len(self._requests) == 0: raise ValueError("No deferred requests") multi = MIMEMultipart() for method, uri, headers, body in self._requests: subrequest = MIMEApplicationHTTP(method, uri, headers, body) multi.attach(subrequest) # The `email` package expects to deal with "native" strings if six.PY3: # pragma: NO COVER Python3 buf = io.StringIO() else: # pragma: NO COVER Python2 buf = io.BytesIO() generator = Generator(buf, False, 0) generator.flatten(multi) payload = buf.getvalue() # Strip off redundant header text _, body = payload.split('\n\n', 1) return dict(multi._headers), body def _finish_futures(self, responses): """Apply all the batch responses to the futures created. :type responses: list of (headers, payload) tuples. :param responses: List of headers and payloads from each response in the batch. :raises: :class:`ValueError` if no requests have been deferred. """ # If a bad status occurs, we track it, but don't raise an exception # until all futures have been populated. exception_args = None if len(self._target_objects) != len(responses): raise ValueError('Expected a response for every request.') for target_object, sub_response in zip(self._target_objects, responses): resp_headers, sub_payload = sub_response if not 200 <= resp_headers.status < 300: exception_args = exception_args or (resp_headers, sub_payload) elif target_object is not None: target_object._properties = sub_payload if exception_args is not None: raise make_exception(*exception_args) def finish(self): """Submit a single `multipart/mixed` request w/ deferred requests. :rtype: list of tuples :returns: one ``(headers, payload)`` tuple per deferred request. """ headers, body = self._prepare_batch_request() url = '%s/batch' % self.API_BASE_URL # Use the private ``_connection`` rather than the public # ``.connection``, since the public connection may be this # current batch. response, content = self._client._connection._make_request( 'POST', url, data=body, headers=headers) responses = list(_unpack_batch_response(response, content)) self._finish_futures(responses) return responses def current(self): """Return the topmost batch, or None.""" return self._client.current_batch def __enter__(self): self._client._push_batch(self) return self def __exit__(self, exc_type, exc_val, exc_tb): try: if exc_type is None: self.finish() finally: self._client._pop_batch() def _generate_faux_mime_message(parser, response, content): """Convert response, content -> (multipart) email.message. Helper for _unpack_batch_response. """ # We coerce to bytes to get consitent concat across # Py2 and Py3. Percent formatting is insufficient since # it includes the b in Py3. if not isinstance(content, six.binary_type): content = content.encode('utf-8') content_type = response['content-type'] if not isinstance(content_type, six.binary_type): content_type = content_type.encode('utf-8') faux_message = b''.join([ b'Content-Type: ', content_type, b'\nMIME-Version: 1.0\n\n', content, ]) if six.PY2: return parser.parsestr(faux_message) else: # pragma: NO COVER Python3 return parser.parsestr(faux_message.decode('utf-8')) def _unpack_batch_response(response, content): """Convert response, content -> [(headers, payload)]. Creates a generator of tuples of emulating the responses to :meth:`httplib2.Http.request` (a pair of headers and payload). :type response: :class:`httplib2.Response` :param response: HTTP response / headers from a request. :type content: string :param content: Response payload with a batch response. :rtype: generator :returns: A generator of header, payload pairs. """ parser = Parser() message = _generate_faux_mime_message(parser, response, content) if not isinstance(message._payload, list): raise ValueError('Bad response: not multi-part') for subrequest in message._payload: status_line, rest = subrequest._payload.split('\n', 1) _, status, _ = status_line.split(' ', 2) sub_message = parser.parsestr(rest) payload = sub_message._payload ctype = sub_message['Content-Type'] msg_headers = dict(sub_message._headers) msg_headers['status'] = status headers = httplib2.Response(msg_headers) if ctype and ctype.startswith('application/json'): payload = json.loads(payload) yield headers, payload
from math import cos, sin import numpy as np from OpenGL.GL import * from PyEngine3D.Utilities import * from PyEngine3D.App import CoreManager from PyEngine3D.OpenGLContext import CreateTexture, Texture2D, Texture3D, FrameBuffer from PyEngine3D.Render import ScreenQuad from .Constants import * def CieColorMatchingFunctionTableValue(wavelength, column): if wavelength <= kLambdaMin or wavelength >= kLambdaMax: return 0.0 u = (wavelength - kLambdaMin) / 5.0 row = int(u) assert(row >= 0 and row + 1 < 95) assert(CIE_2_DEG_COLOR_MATCHING_FUNCTIONS[4 * row] <= wavelength <= CIE_2_DEG_COLOR_MATCHING_FUNCTIONS[4 * (row + 1)]) u -= row return CIE_2_DEG_COLOR_MATCHING_FUNCTIONS[4 * row + column] * (1.0 - u) + \ CIE_2_DEG_COLOR_MATCHING_FUNCTIONS[4 * (row + 1) + column] * u def Interpolate(wavelengths, wavelength_function, wavelength): assert(len(wavelength_function) == len(wavelengths)) if wavelength < wavelengths[0]: return wavelength_function[0] for i in range(len(wavelengths) - 1): if wavelength < wavelengths[i + 1]: u = (wavelength - wavelengths[i]) / (wavelengths[i + 1] - wavelengths[i]) return wavelength_function[i] * (1.0 - u) + wavelength_function[i + 1] * u return wavelength_function[wavelength_function.size() - 1] # The returned constants are in lumen.nm / watt. def ComputeSpectralRadianceToLuminanceFactors(wavelengths, solar_irradiance, lambda_power): k_r = 0.0 k_g = 0.0 k_b = 0.0 solar_r = Interpolate(wavelengths, solar_irradiance, kLambdaR) solar_g = Interpolate(wavelengths, solar_irradiance, kLambdaG) solar_b = Interpolate(wavelengths, solar_irradiance, kLambdaB) dlambda = 1 for L in range(kLambdaMin, kLambdaMax, dlambda): x_bar = CieColorMatchingFunctionTableValue(L, 1) y_bar = CieColorMatchingFunctionTableValue(L, 2) z_bar = CieColorMatchingFunctionTableValue(L, 3) r_bar = XYZ_TO_SRGB[0] * x_bar + XYZ_TO_SRGB[1] * y_bar + XYZ_TO_SRGB[2] * z_bar g_bar = XYZ_TO_SRGB[3] * x_bar + XYZ_TO_SRGB[4] * y_bar + XYZ_TO_SRGB[5] * z_bar b_bar = XYZ_TO_SRGB[6] * x_bar + XYZ_TO_SRGB[7] * y_bar + XYZ_TO_SRGB[8] * z_bar irradiance = Interpolate(wavelengths, solar_irradiance, L) k_r += r_bar * irradiance / solar_r * pow(L / kLambdaR, lambda_power) k_g += g_bar * irradiance / solar_g * pow(L / kLambdaG, lambda_power) k_b += b_bar * irradiance / solar_b * pow(L / kLambdaB, lambda_power) k_r *= MAX_LUMINOUS_EFFICACY * dlambda k_g *= MAX_LUMINOUS_EFFICACY * dlambda k_b *= MAX_LUMINOUS_EFFICACY * dlambda return [k_r, k_g, k_b] def ConvertSpectrumToLinearSrgb(wavelengths, spectrum): x = 0.0 y = 0.0 z = 0.0 dlambda = 1 for L in range(kLambdaMin, kLambdaMax, dlambda): value = Interpolate(wavelengths, spectrum, L) x += CieColorMatchingFunctionTableValue(L, 1) * value y += CieColorMatchingFunctionTableValue(L, 2) * value z += CieColorMatchingFunctionTableValue(L, 3) * value r = MAX_LUMINOUS_EFFICACY * (XYZ_TO_SRGB[0] * x + XYZ_TO_SRGB[1] * y + XYZ_TO_SRGB[2] * z) * dlambda g = MAX_LUMINOUS_EFFICACY * (XYZ_TO_SRGB[3] * x + XYZ_TO_SRGB[4] * y + XYZ_TO_SRGB[5] * z) * dlambda b = MAX_LUMINOUS_EFFICACY * (XYZ_TO_SRGB[6] * x + XYZ_TO_SRGB[7] * y + XYZ_TO_SRGB[8] * z) * dlambda return r, g, b class DensityProfileLayer: def __init__(self, width=0.0, exp_term=0.0, exp_scale=0.0, linear_term=0.0, constant_term=0.0): self.width = width self.exp_term = exp_term self.exp_scale = exp_scale self.linear_term = linear_term self.constant_term = constant_term class Model: def __init__(self, wavelengths, solar_irradiance, sun_angular_radius, bottom_radius, top_radius, rayleigh_density, rayleigh_scattering, mie_density, mie_scattering, mie_extinction, mie_phase_function_g, absorption_density, absorption_extinction, ground_albedo, max_sun_zenith_angle, length_unit_in_meters, num_precomputed_wavelengths, precompute_illuminance, use_combined_textures): self.wavelengths = wavelengths self.solar_irradiance = solar_irradiance self.sun_angular_radius = sun_angular_radius self.bottom_radius = bottom_radius self.top_radius = top_radius self.rayleigh_density = rayleigh_density self.rayleigh_scattering = rayleigh_scattering self.mie_density = mie_density self.mie_scattering = mie_scattering self.mie_extinction = mie_extinction self.mie_phase_function_g = mie_phase_function_g self.absorption_density = absorption_density self.absorption_extinction = absorption_extinction self.ground_albedo = ground_albedo self.max_sun_zenith_angle = max_sun_zenith_angle self.length_unit_in_meters = length_unit_in_meters self.num_precomputed_wavelengths = num_precomputed_wavelengths self.precompute_illuminance = precompute_illuminance self.use_combined_textures = use_combined_textures self.material_instance_macros = { 'COMBINED_SCATTERING_TEXTURES': 1 if use_combined_textures else 0 } # Atmosphere shader code resource_manager = CoreManager.instance().resource_manager shaderLoader = resource_manager.shader_loader shader_name = 'precomputed_atmosphere.atmosphere_predefine' recompute_atmosphere_predefine = resource_manager.get_shader(shader_name) recompute_atmosphere_predefine.shader_code = self.glsl_header_factory([kLambdaR, kLambdaG, kLambdaB]) shaderLoader.save_resource(shader_name) shaderLoader.load_resource(shader_name) self.transmittance_texture = CreateTexture( name="precomputed_atmosphere.transmittance", texture_type=Texture2D, width=TRANSMITTANCE_TEXTURE_WIDTH, height=TRANSMITTANCE_TEXTURE_HEIGHT, internal_format=GL_RGBA32F, texture_format=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, data_type=GL_FLOAT, wrap=GL_CLAMP_TO_EDGE ) self.scattering_texture = CreateTexture( name="precomputed_atmosphere.scattering", texture_type=Texture3D, width=SCATTERING_TEXTURE_WIDTH, height=SCATTERING_TEXTURE_HEIGHT, depth=SCATTERING_TEXTURE_DEPTH, internal_format=GL_RGBA32F, texture_format=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, data_type=GL_FLOAT, wrap=GL_CLAMP_TO_EDGE ) self.irradiance_texture = CreateTexture( name="precomputed_atmosphere.irradiance", texture_type=Texture2D, width=IRRADIANCE_TEXTURE_WIDTH, height=IRRADIANCE_TEXTURE_HEIGHT, internal_format=GL_RGBA32F, texture_format=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, data_type=GL_FLOAT, wrap=GL_CLAMP ) self.optional_single_mie_scattering_texture = None if not self.use_combined_textures: self.optional_single_mie_scattering_texture = CreateTexture( name="precomputed_atmosphere.optional_single_mie_scattering_texture", texture_type=Texture3D, width=SCATTERING_TEXTURE_WIDTH, height=SCATTERING_TEXTURE_HEIGHT, depth=SCATTERING_TEXTURE_DEPTH, internal_format=GL_RGBA32F, texture_format=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, data_type=GL_FLOAT, wrap=GL_CLAMP ) self.delta_irradiance_texture = CreateTexture( name="precomputed_atmosphere.delta_irradiance_texture", texture_type=Texture2D, width=IRRADIANCE_TEXTURE_WIDTH, height=IRRADIANCE_TEXTURE_HEIGHT, internal_format=GL_RGBA32F, texture_format=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, data_type=GL_FLOAT, wrap=GL_CLAMP ) self.delta_rayleigh_scattering_texture = CreateTexture( name="precomputed_atmosphere.delta_rayleigh_scattering_texture", texture_type=Texture3D, width=SCATTERING_TEXTURE_WIDTH, height=SCATTERING_TEXTURE_HEIGHT, depth=SCATTERING_TEXTURE_DEPTH, internal_format=GL_RGBA32F, texture_format=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, data_type=GL_FLOAT, wrap=GL_CLAMP ) self.delta_mie_scattering_texture = CreateTexture( name="precomputed_atmosphere.delta_mie_scattering_texture", texture_type=Texture3D, width=SCATTERING_TEXTURE_WIDTH, height=SCATTERING_TEXTURE_HEIGHT, depth=SCATTERING_TEXTURE_DEPTH, internal_format=GL_RGBA32F, texture_format=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, data_type=GL_FLOAT, wrap=GL_CLAMP ) self.delta_scattering_density_texture = CreateTexture( name="precomputed_atmosphere.delta_scattering_density_texture", texture_type=Texture3D, width=SCATTERING_TEXTURE_WIDTH, height=SCATTERING_TEXTURE_HEIGHT, depth=SCATTERING_TEXTURE_DEPTH, internal_format=GL_RGBA32F, texture_format=GL_RGBA, min_filter=GL_LINEAR, mag_filter=GL_LINEAR, data_type=GL_FLOAT, wrap=GL_CLAMP ) self.delta_multiple_scattering_texture = self.delta_rayleigh_scattering_texture self.quad = ScreenQuad.get_vertex_array_buffer() def glsl_header_factory(self, lambdas): def to_string(v, lambdas, scale): r = Interpolate(self.wavelengths, v, lambdas[0]) * scale g = Interpolate(self.wavelengths, v, lambdas[1]) * scale b = Interpolate(self.wavelengths, v, lambdas[2]) * scale return "vec3(%f, %f, %f)" % (r, g, b) def density_layer(layer): return "DensityProfileLayer(%f, %f, %f, %f, %f)" % (layer.width / self.length_unit_in_meters, layer.exp_term, layer.exp_scale * self.length_unit_in_meters, layer.linear_term * self.length_unit_in_meters, layer.constant_term) def density_profile(layers): kLayerCount = 2 while len(layers) < kLayerCount: layers.insert(0, DensityProfileLayer()) result = "DensityProfile(DensityProfileLayer[%d](" % kLayerCount for i in range(kLayerCount): result += density_layer(layers[i]) if i < kLayerCount - 1: result += "," else: result += "))" return result header = ["const int TRANSMITTANCE_TEXTURE_WIDTH = %d;" % TRANSMITTANCE_TEXTURE_WIDTH, "const int TRANSMITTANCE_TEXTURE_HEIGHT = %d;" % TRANSMITTANCE_TEXTURE_HEIGHT, "const int SCATTERING_TEXTURE_R_SIZE = %d;" % SCATTERING_TEXTURE_R_SIZE, "const int SCATTERING_TEXTURE_MU_SIZE = %d;" % SCATTERING_TEXTURE_MU_SIZE, "const int SCATTERING_TEXTURE_MU_S_SIZE = %d;" % SCATTERING_TEXTURE_MU_S_SIZE, "const int SCATTERING_TEXTURE_NU_SIZE = %d;" % SCATTERING_TEXTURE_NU_SIZE, "const int IRRADIANCE_TEXTURE_WIDTH = %d;" % IRRADIANCE_TEXTURE_WIDTH, "const int IRRADIANCE_TEXTURE_HEIGHT = %d;" % IRRADIANCE_TEXTURE_HEIGHT, "const vec2 IRRADIANCE_TEXTURE_SIZE = vec2(%d, %d);" % ( IRRADIANCE_TEXTURE_WIDTH, IRRADIANCE_TEXTURE_HEIGHT), "", '#include "precomputed_atmosphere/definitions.glsl"', "", "const AtmosphereParameters ATMOSPHERE = AtmosphereParameters(", to_string(self.solar_irradiance, lambdas, 1.0) + ",", str(self.sun_angular_radius) + ",", str(self.bottom_radius / self.length_unit_in_meters) + ",", str(self.top_radius / self.length_unit_in_meters) + ",", density_profile(self.rayleigh_density) + ",", to_string(self.rayleigh_scattering, lambdas, self.length_unit_in_meters) + ",", density_profile(self.mie_density) + ",", to_string(self.mie_scattering, lambdas, self.length_unit_in_meters) + ",", to_string(self.mie_extinction, lambdas, self.length_unit_in_meters) + ",", str(self.mie_phase_function_g) + ",", density_profile(self.absorption_density) + ",", to_string(self.absorption_extinction, lambdas, self.length_unit_in_meters) + ",", to_string(self.ground_albedo, lambdas, 1.0) + ",", str(cos(self.max_sun_zenith_angle)) + ");", ""] return "\n".join(header) def generate(self, num_scattering_orders=4): resource_manager = CoreManager.instance().resource_manager framebuffer_manager = CoreManager.instance().renderer.framebuffer_manager if not self.precompute_illuminance: lambdas = [kLambdaR, kLambdaG, kLambdaB] luminance_from_radiance = Matrix3() self.Precompute(lambdas, luminance_from_radiance, False, num_scattering_orders) else: num_iterations = (self.num_precomputed_wavelengths + 2) / 3 dlambda = (kLambdaMax - kLambdaMin) / (3 * num_iterations) def coeff(L, component): x = CieColorMatchingFunctionTableValue(L, 1) y = CieColorMatchingFunctionTableValue(L, 2) z = CieColorMatchingFunctionTableValue(L, 3) return (XYZ_TO_SRGB[component * 3] * x + XYZ_TO_SRGB[component * 3 + 1] * y + XYZ_TO_SRGB[component * 3 + 2] * z) * dlambda for i in range(int(num_iterations)): lambdas = [kLambdaMin + (3 * i + 0.5) * dlambda, kLambdaMin + (3 * i + 1.5) * dlambda, kLambdaMin + (3 * i + 2.5) * dlambda] luminance_from_radiance = Matrix3() luminance_from_radiance[0] = [coeff(lambdas[0], 0), coeff(lambdas[1], 0), coeff(lambdas[2], 0)] luminance_from_radiance[1] = [coeff(lambdas[0], 1), coeff(lambdas[1], 1), coeff(lambdas[2], 1)] luminance_from_radiance[2] = [coeff(lambdas[0], 2), coeff(lambdas[1], 2), coeff(lambdas[2], 2)] self.Precompute(lambdas, luminance_from_radiance, 0 < i, num_scattering_orders) # Note : recompute compute_transmittance framebuffer_manager.bind_framebuffer(self.transmittance_texture) recompute_transmittance_mi = resource_manager.get_material_instance( 'precomputed_atmosphere.recompute_transmittance', macros=self.material_instance_macros) recompute_transmittance_mi.use_program() self.quad.draw_elements() # save textures def save_texture(texture): resource = resource_manager.texture_loader.get_resource(texture.name) if resource is None: resource = resource_manager.texture_loader.create_resource(texture.name, texture) else: old_texture = resource.get_data() old_texture.delete() resource.set_data(texture) resource_manager.texture_loader.save_resource(resource.name) # precomputed textures save_texture(self.transmittance_texture) save_texture(self.scattering_texture) save_texture(self.irradiance_texture) # intermediate processing textures # save_texture(self.delta_irradiance_texture) # save_texture(self.delta_rayleigh_scattering_texture) # save_texture(self.delta_mie_scattering_texture) # save_texture(self.delta_scattering_density_texture) # if not self.use_combined_textures: # save_texture(self.optional_single_mie_scattering_texture) def Precompute(self, lambdas, luminance_from_radiance, blend, num_scattering_orders): resource_manager = CoreManager.instance().resource_manager framebuffer_manager = CoreManager.instance().renderer.framebuffer_manager shaderLoader = resource_manager.shader_loader shader_name = 'precomputed_atmosphere.compute_atmosphere_predefine' compute_atmosphere_predefine = resource_manager.get_shader(shader_name) compute_atmosphere_predefine.shader_code = self.glsl_header_factory(lambdas) shaderLoader.save_resource(shader_name) shaderLoader.load_resource(shader_name) glEnable(GL_BLEND) glBlendEquation(GL_FUNC_ADD) glBlendFunc(GL_ONE, GL_ONE) # compute_transmittance framebuffer_manager.bind_framebuffer(self.transmittance_texture) glDisablei(GL_BLEND, 0) compute_transmittance_mi = resource_manager.get_material_instance( 'precomputed_atmosphere.compute_transmittance', macros=self.material_instance_macros) compute_transmittance_mi.use_program() self.quad.draw_elements() # compute_direct_irradiance framebuffer_manager.bind_framebuffer(self.delta_irradiance_texture, self.irradiance_texture) glDisablei(GL_BLEND, 0) if blend: glEnablei(GL_BLEND, 1) else: glDisablei(GL_BLEND, 1) compute_direct_irradiance_mi = resource_manager.get_material_instance( 'precomputed_atmosphere.compute_direct_irradiance', macros=self.material_instance_macros) compute_direct_irradiance_mi.use_program() compute_direct_irradiance_mi.bind_uniform_data('transmittance_texture', self.transmittance_texture) self.quad.draw_elements() # compute_single_scattering compute_single_scattering_mi = resource_manager.get_material_instance( 'precomputed_atmosphere.compute_single_scattering', macros=self.material_instance_macros) compute_single_scattering_mi.use_program() compute_single_scattering_mi.bind_uniform_data('luminance_from_radiance', luminance_from_radiance) compute_single_scattering_mi.bind_uniform_data('transmittance_texture', self.transmittance_texture) glDisablei(GL_BLEND, 0) glDisablei(GL_BLEND, 1) if blend: glEnablei(GL_BLEND, 2) glEnablei(GL_BLEND, 3) else: glDisablei(GL_BLEND, 2) glDisablei(GL_BLEND, 3) for layer in range(SCATTERING_TEXTURE_DEPTH): if self.optional_single_mie_scattering_texture is None: framebuffer_manager.bind_framebuffer(self.delta_rayleigh_scattering_texture, self.delta_mie_scattering_texture, self.scattering_texture, target_layer=layer) else: framebuffer_manager.bind_framebuffer(self.delta_rayleigh_scattering_texture, self.delta_mie_scattering_texture, self.scattering_texture, self.optional_single_mie_scattering_texture, target_layer=layer) compute_single_scattering_mi.bind_uniform_data("layer", layer) self.quad.draw_elements() for scattering_order in range(2, num_scattering_orders + 1): # compute_scattering_density glDisablei(GL_BLEND, 0) compute_scattering_density_mi = resource_manager.get_material_instance( 'precomputed_atmosphere.compute_scattering_density', macros=self.material_instance_macros ) compute_scattering_density_mi.use_program() compute_scattering_density_mi.bind_uniform_data('transmittance_texture', self.transmittance_texture) compute_scattering_density_mi.bind_uniform_data('single_rayleigh_scattering_texture', self.delta_rayleigh_scattering_texture) compute_scattering_density_mi.bind_uniform_data('single_mie_scattering_texture', self.delta_mie_scattering_texture) compute_scattering_density_mi.bind_uniform_data('multiple_scattering_texture', self.delta_multiple_scattering_texture) compute_scattering_density_mi.bind_uniform_data('irradiance_texture', self.delta_irradiance_texture) compute_scattering_density_mi.bind_uniform_data('scattering_order', scattering_order) for layer in range(SCATTERING_TEXTURE_DEPTH): framebuffer_manager.bind_framebuffer(self.delta_scattering_density_texture, target_layer=layer) compute_scattering_density_mi.bind_uniform_data('layer', layer) self.quad.draw_elements() # compute_indirect_irradiance framebuffer_manager.bind_framebuffer(self.delta_irradiance_texture, self.irradiance_texture) glDisablei(GL_BLEND, 0) glEnablei(GL_BLEND, 1) compute_indirect_irradiance_mi = resource_manager.get_material_instance( 'precomputed_atmosphere.compute_indirect_irradiance', macros=self.material_instance_macros ) compute_indirect_irradiance_mi.use_program() compute_indirect_irradiance_mi.bind_uniform_data('luminance_from_radiance', luminance_from_radiance) compute_indirect_irradiance_mi.bind_uniform_data('scattering_order', scattering_order - 1) compute_indirect_irradiance_mi.bind_uniform_data('single_rayleigh_scattering_texture', self.delta_rayleigh_scattering_texture) compute_indirect_irradiance_mi.bind_uniform_data('single_mie_scattering_texture', self.delta_mie_scattering_texture) compute_indirect_irradiance_mi.bind_uniform_data('multiple_scattering_texture', self.delta_multiple_scattering_texture) self.quad.draw_elements() # compute_multiple_scattering glDisablei(GL_BLEND, 0) glEnablei(GL_BLEND, 1) compute_multiple_scattering_mi = resource_manager.get_material_instance( 'precomputed_atmosphere.compute_multiple_scattering', macros=self.material_instance_macros ) compute_multiple_scattering_mi.use_program() compute_multiple_scattering_mi.bind_uniform_data('luminance_from_radiance', luminance_from_radiance) compute_multiple_scattering_mi.bind_uniform_data('transmittance_texture', self.transmittance_texture) compute_multiple_scattering_mi.bind_uniform_data('scattering_density_texture', self.delta_scattering_density_texture) for layer in range(SCATTERING_TEXTURE_DEPTH): framebuffer_manager.bind_framebuffer(self.delta_multiple_scattering_texture, self.scattering_texture, target_layer=layer) compute_multiple_scattering_mi.bind_uniform_data('layer', layer) self.quad.draw_elements()
#!/usr/bin/python # ############################################################################ # # NetJobs - a network job scheduler. # # # # Author: Ramon A. Lovato (ramonalovato.com) # # For: Deepstorage, LLC (deepstorage.net) # # Version: 0.1 # # # # Usage: NetJobs.py [OPTIONS] [PATH] # # OPTIONS # # -h Display help message. # # -s Run in simulator mode (disables networking). # # -v Run in verbose mode. # # PATH # # Relative or absolute path to configuration file (required). # # # # Example: NetJobs.py -v "C:\NetJobs\testconfig.txt" # # ############################################################################ # import sys import re import socket import threading from collections import deque # ############################################################################ # # Constants and global variables. # # ############################################################################ # ARGC_MAX = 3 ARGS_REGEX = '\-[hsv]+' DELIMETER = ': *' NUM_TESTS_REGEX = '^tests *: *\d+\s*$' TEST_LABEL_REGEX = '^\w+ *:\s*$' TEST_SPEC_REGEX = '^(\w|\.)+ *: *.*\s*$' TEST_TIMEOUT_REGEX = '^\-timeout *: *((\d+)|(none))\s*$' TEST_MIN_HOSTS_REGEX = '^\-minhosts *: *(\d+|all)\s*$' TEST_END_REGEX = '^end\s*$' TIMEOUT_NONE = 0 MIN_HOSTS_ALL = -1 AGENT_LISTEN_PORT = 16192 BUFFER_SIZE = 4096 START_STRING = 'GO' SUCCESS_STRING = 'SUCCESS' ERROR_STRING = 'ERROR' global stdscr global verbose verbose = False global simulate simulate = False # ############################################################################ # # NetJobs class. # # ############################################################################ # class NetJobs: "NetJobs main class" # Instance variables. global verbose path_in = '' tests = [] sockets = {} listeners = {} results = {} # # Initializer. # def __init__(self, argv): "basic initializer" # Process CLI arguments. self.eval_options(argv) if verbose == True: print('Setup...') print(' "%s" given as configuration file path.' % (self.path_in)) # Parse configuration file. self.parse_config() # # Evaluate CLI arguments. # def eval_options(self, argv): "evaluate CLI arguments and act on them" argc = len(argv) sample = re.compile(ARGS_REGEX) if argc > ARGC_MAX: terminate() elif argc == 1: self.path_in = ask_for_path() elif argc == 2: if not sample.match(argv[1]) == None: self.act_on_options(argv[1]) self.path_in = ask_for_path() else: self.path_in = argv[1] elif argc == ARGC_MAX: if (sample.match(argv[1]) == None or not sample.match(argv[2]) == None): terminate() else: self.act_on_options(argv[1]) self.path_in = argv[2] # # Process optional CLI arguments. Called as part of eval_options. # def act_on_options(self, args): "act on CLI arguments" if 'h' in args: instructions() if 's' in args: global simulate simulate = True if 'v' in args: global verbose verbose = True print('\nVerbose logging enabled.\n') # # Parse input file. # def parse_config(self): "configure the run according to the configuration file specifications" global verbose numTestsRegex = re.compile(NUM_TESTS_REGEX) testLabelRegex = re.compile(TEST_LABEL_REGEX) testSpecRegex = re.compile(TEST_SPEC_REGEX) testTimeoutRegex = re.compile(TEST_TIMEOUT_REGEX) testMinHostsRegex = re.compile(TEST_MIN_HOSTS_REGEX) testEndRegex = re.compile(TEST_END_REGEX) numTests = -1 try: with open(self.path_in, 'r', newline='') as file: # Filter out empty lines. lines = deque(filter(None, (line.rstrip() for line in file))) # Expect first line to give number of tests and timeout type/time. line = lines.popleft() tokens = line.split() if numTestsRegex.match(line) == None: sys.exit('file %s: first line must be of form '\ '"tests: x", for x > 0' % self.path_in) else: try: numTests = int(tokens[1]) except ValueError: sys.exit('file %s: test count must be integer > 0' % self.path_in) if verbose: print(' Test count: %d' % numTests) # Dictionary for test specs. Attaches to testConfig object. specs = {} # Read rest of file. while lines: inTestBlock = False # Get test name. line = lines.popleft() tokens = re.split(DELIMETER, line) testName = '' if testLabelRegex.match(line) == None: sys.exit('file %s: expected test label but found '\ '"%s"' % (self.path_in, line)) else: inTestBlock = True timeout = TIMEOUT_NONE minHosts = MIN_HOSTS_ALL testLabel = tokens[0] # Get test specifications. Go until next test label line. while inTestBlock: # Reached end of file without closing block. if not lines: sys.exit('file %s: test "%s" - no end marker' % (self.path_in, testName)) line = lines.popleft() tokens = re.split(DELIMETER, line) # Is it a spec line? if testSpecRegex.match(line): target = tokens[0] command = tokens[1] # Add test spec to specs dictionary. specs[target] = command # Is it a timeout line? elif testTimeoutRegex.match(line): if tokens[1] == 'none': timeout = TIMEOUT_NONE else: try: timeout = int(tokens[1]) except ValueError: sys.exit('file %s: timeout specification '\ 'must be "complete" or integer > 0 '\ '(in secs)' % self.path_in) # Is it a minhosts line? elif testMinHostsRegex.match(line): if tokens[1] == 'all': minHosts = MIN_HOSTS_ALL else: try: minHosts = int(tokens[1]) except ValueError: sys.exit('file %s: minhosts specification '\ 'must be "all" or integer > 0 ' % self.path_in) # Is it an end marker? elif testEndRegex.match(line): inTestBlock = False # Add the test configuration to the list. self.tests.append(TestConfig(testLabel, timeout, specs, minHosts)) # Else unknown. else: sys.exit('file %s: unable to interpret line "%s"' % (self.path_in, line)) # Catch IOError exception and exit. except IOError as e: sys.exit('file %s: %s' % (self.path_in, e)) # # Prepare remote agents. # def prepAgents(self, test): if verbose: print(' Preparing agents...') targets = list(test.specs.keys()) for target in targets: # Create TCP socket. Skip if in simulation mode. if not simulate: try: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) except socket.error as e: sys.exit('Failed to create socket for target "%s": %s.' % (target, e)) # Bind socket to local machine name and specified port. try: hostname = socket.gethostname() port = AGENT_LISTEN_PORT sock.connect((hostname, port)) # Perform a simple echo test to make sure it works. testBytes = bytes('hello, ' + target, 'UTF-8') sock.sendall(testBytes) response = sock.recv(BUFFER_SIZE) if response == testBytes: sock.sendall(bytes(test.specs[target], 'UTF-8')) self.sockets[target] = sock else: # TODO print('Agent %d failed echo test. Skipping.' % target) except socket.error as e: sys.exit('Failed to open connection to socket for target '\ '"%s": %s.' % (target, e)) if verbose: print(' ...finished.\n') # # Start remote agents. # def startAgents(self, test): if verbose: print(' Starting agents...') for target in list(self.sockets.keys()): socket = self.sockets[target] # Start a ListenThread to wait for results. listener = ListenThread(target, socket, self.results) self.listeners[target] = listener listener.start() # Send the start command. socket.sendall(bytes(START_STRING, 'UTF-8')) if verbose: print(' ...finished.\n') # # Start remote agents. # def waitForResults(self, test): if verbose: print(' Waiting for agent results...') for listener in self.listeners.values(): listener.join(timeout=None) if verbose: print(' ...finished.\n') # # Clean up after test. # def cleanUp(self, test): if verbose: print(' Cleaning up...') for socket in list(self.sockets.values()): socket.close() if verbose: print(' ...finished.\n') # # Print final results. # def printResults(self, test): if verbose: print(' Final results...') print() for target in self.results.keys(): result = self.results[target] print(' %s : %s' % (target, str(result))) print() if verbose: print(' ...finished.\n') # # Start. # def start(self): "begin execution" if verbose: print('\nStarting run...\n') for test in self.tests: if verbose: print(' %s...' % test.label) # Prepare remote agents. self.prepAgents(test) # Start remote agents. self.startAgents(test) # Wait for remote agent return status. self.waitForResults(test) # Clean up. self.cleanUp(test) # Print results. self.printResults(test) if verbose: print('\nFinishing...\n') # ############################################################################ # # TestConfig class for storing test configurations. # # ############################################################################ # class TestConfig: "data structure class for storing test configurations" def __init__(self, label, timeout, specs, minHosts): "basic initializer" self.label = label self.timeout = timeout self.specs = specs self.minHosts = minHosts # ############################################################################ # # ListenThread class for listening for test results. # # ############################################################################ # class ListenThread(threading.Thread): "listens for test results for a given agent" def __init__(self, target, socket, results): threading.Thread.__init__(self) self.target = target self.socket = socket self.results = results def run(self): print('run started') while True: # Wait for result to be transmitted from agent. buffer = self.socket.recv(BUFFER_SIZE) if buffer: status = buffer.decode('UTF-8') self.results[self.target] = status break # ############################################################################ # # Functions. # # ############################################################################ # # # Ask the user to provide the config file path. # def ask_for_path(): return input('Please enter the configuration file path: ') # # Print CLI usage instructions. # def instructions(): "print usage instructions" print() print(r'Usage: NetJobs.py [OPTIONS] [PATH]') print(r'OPTIONS') print(r' -h Display this message.') print(r' -s Run in simulator mode (disables networking).') print(r' -v Run in verbose mode.') print(r'PATH') print(r' Relative or absolute path to source file (required).') print() print(r'NetJobs.py -v "C:\NetJobs\testconfig.txt"') print() # # Exit with error and print instructions. # def terminate(): "terminate with error and print instructions" instructions() sys.exit(1) # # Main. # def main(): "main function" global verbose # Create NetJobs object to handle the work. jobs = NetJobs(sys.argv) # Run. jobs.start() # Finish. if verbose: print('All jobs completed successfully.') exit(0) # ############################################################################ # # Execute main. # # ############################################################################ # if __name__ == "__main__": main()
#!/usr/bin/env python import os from copy import deepcopy import numpy as np from numpy.testing import assert_array_equal import gippy as gp import unittest import gippy.test as gpt class GeoImageTests(unittest.TestCase): prefix = 'test-' def setUp(self): """ Configure options """ gp.Options.set_verbose(1) gp.Options.set_chunksize(4.0) def test0_open(self): """ Open existing image """ geoimg = gpt.get_test_image() self.assertEqual(geoimg.xsize(), 627) self.assertEqual(geoimg.ysize(), 603) def test1_create(self): """ Create single band image """ fout = 'test.tif' geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000) self.assertTrue(geoimg.xsize() == 1000) self.assertTrue(geoimg.ysize() == 1000) self.assertTrue(os.path.exists(fout)) # test resolution res = geoimg.resolution() self.assertEqual(res.x(), 1.0/geoimg.xsize()) self.assertEqual(res.y(), -1.0/geoimg.ysize()) os.remove(fout) def test_read(self): """ Read multiband image """ geoimg = gpt.get_test_image() arr = geoimg.read() self.assertEqual(geoimg.nbands(), arr.shape[0]) # make sure x, y dimensions are same when reading single bands self.assertEqual(arr.shape[1:3], geoimg[0].read().shape) def test_read_random_pixels(self): """ Read random pixels """ geoimg = gpt.get_test_image() arr = geoimg.read_random_pixels(1000) def test_uint16_read(self): """ read uint16 makes uint16 array """ fout = 'test.tif' geoimg = gp.GeoImage.create(fout, dtype='uint16') self.assertTrue(os.path.exists(fout)) arr = geoimg.read() self.assertEqual(str(arr.dtype), geoimg.type().string()) os.remove(fout) def test_loop_through_bands(self): """ Check that GeoImage is iterable """ geoimg = gpt.get_test_image() for band in geoimg: self.assertEqual(band.xsize(), geoimg.xsize()) def test_select(self): """ Selection of bands from GeoImage """ img1 = gpt.get_test_image() img2 = img1.select(['red', 'green', 'blue']) self.assertTrue(np.array_equal(img1['red'].read(), img2[0].read())) self.assertTrue(np.array_equal(img1['green'].read(), img2[1].read())) self.assertTrue(np.array_equal(img1['blue'].read(), img2[2].read())) def test_persistent_metadata(self): """ Writing metadata and check for persistence after reopening """ fout = 'test-meta.tif' geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000, nb=3) geoimg.set_bandnames(['red', 'green', 'blue']) geoimg.set_nodata(7) self.assertEqual(geoimg.bandnames()[0], 'red') geoimg = None # reopen geoimg = gp.GeoImage(fout) self.assertEqual(geoimg[0].nodata(), 7) self.assertEqual(list(geoimg.bandnames()), ['red', 'green', 'blue']) geoimg = None os.remove(fout) def test_create_image_with_gain(self): """ Create int image with floating point gain """ fout = 'test-gain.tif' geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000, dtype='int16') geoimg.set_gain(0.0001) arr = np.zeros((1000,1000)) + 0.0001 arr[0:500,:] = 0.0002 geoimg[0].write(deepcopy(arr)) arrout = geoimg[0].read() np.testing.assert_array_equal(arr, arrout) os.remove(fout) def test_create_multiband(self): """ Create an RGB image """ fout = 'test_3band.tif' geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000, nb=3) geoimg.set_bandnames(['green', 'red', 'blue']) # test selection of bands geoimg2 = geoimg.select(["red"]) self.assertTrue(geoimg2.nbands() == 1) self.assertTrue(geoimg["red"].description() == "red") geoimg = None geoimg2 = None os.remove(fout) def test_create_temp_file(self): """ Create a temp file that is deleted when last reference gone """ fout = self.prefix + '_temp.tif' geoimg = gp.GeoImage.create(fout, xsz=1000, ysz=1000, nb=5, temp=True) self.assertTrue(os.path.exists(fout)) # keep a band band = geoimg[1] geoimg = None # band still references file self.assertTrue(os.path.exists(fout)) band = None # file should now have been deleted self.assertFalse(os.path.exists(fout)) def test_create_autoname_temp(self): """ Create temp file with auto-generated filename """ geoimg = gp.GeoImage.create(xsz=1000, ysz=1000, nb=3) fout = geoimg.filename() self.assertTrue(os.path.exists(fout)) geoimg = None self.assertFalse(os.path.exists(fout)) def test_autoscale(self): """ Auto scale each band in image """ geoimg = gpt.get_test_image() for band in geoimg: self.assertTrue(band.min() != 1.0) self.assertTrue(band.max() != 255.0) geoimg2 = geoimg.autoscale(minout=1.0, maxout=255.0) for band in geoimg2: self.assertTrue(band.min() == 1) self.assertTrue(band.max() == 255) def test_overviews(self): """ Add overviews to an image """ fout = 'test-overviews.tif' geoimg = gp.GeoImage.create(filename=fout, xsz=1000, ysz=1000, nb=2) fout = geoimg.filename() # add overviews geoimg.add_overviews() # clear overviews geoimg.add_overviews(levels=[]) self.assertFalse(os.path.exists(fout + '.ovr')) geoimg = None geoimg = gp.GeoImage(fout, False) geoimg.add_overviews() self.assertTrue(os.path.exists(fout + '.ovr')) os.remove(fout) os.remove(fout + '.ovr') def test_save(self): """ Save image as new image with different datatype """ fout = 'test-byte.tif' geoimg = gpt.get_test_image().autoscale(1.0, 255.0).save(fout, 'uint8') geoimg = None geoimg = gp.GeoImage(fout) self.assertEqual(geoimg.type().string(), 'uint8') self.assertEqual(geoimg[0].min(), 1.0) self.assertEqual(geoimg[0].max(), 255.0) os.remove(fout) def test_save_with_gain(self): """ Save image with a gain, which should copy through """ geoimg = gpt.get_test_image().select([2]) geoimg.set_gain(0.0001) fout = 'test-savegain.tif' imgout = geoimg.save(fout) assert_array_equal(imgout.read(), geoimg.read()) os.remove(fout) def test_warp(self): """ Warping image into another (blank) image """ bbox = np.array([0.0, 0.0, 1.0, 1.0]) # default image in EPSG:4326 that spans 1 degree geoimg = gp.GeoImage.create(xsz=1000, ysz=1000, nb=3, proj='EPSG:4326', bbox=bbox) # 3857, set resolution to 100 meters imgout = geoimg.warp(proj='EPSG:3857', xres=100.0, yres=100.0) self.assertTrue(os.path.exists(imgout.filename())) self.assertEqual(imgout.xsize(), 1114) self.assertEqual(imgout.ysize(), 1114) self.assertAlmostEqual(np.ceil(imgout.resolution().x()), 100.0) def test_real_warp(self): """ Warp real image to another projection """ geoimg = gpt.get_test_image() fout = 'test-realwarp.tif' imgout = geoimg.warp(fout, proj='EPSG:4326', xres=0.0003, yres=0.0003) self.assertEqual(imgout.xsize(), 653) self.assertEqual(imgout.ysize(), 547) os.remove(fout) def test_warp_into(self): """ Warp real image into an existing image """ geoimg = gpt.get_test_image().select([1]) ext = geoimg.extent() bbox = np.array([ext.x0(), ext.y0(), ext.width(), ext.height()]) imgout = gp.GeoImage.create('', geoimg.xsize(), geoimg.ysize(), 1, geoimg.srs(), bbox, geoimg.type().string()); geoimg.warp_into(imgout) self.assertEqual(imgout.read().sum(), geoimg.read().sum())
# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2012 Josh Durgin # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import contextlib import mock import mox import os import tempfile from cinder import db from cinder import exception from cinder.image import image_utils from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils from cinder import test from cinder.tests.backup.fake_rados import mock_rados from cinder.tests.backup.fake_rados import mock_rbd from cinder.tests.image import fake as fake_image from cinder.tests.test_volume import DriverTestCase from cinder import units from cinder.volume import configuration as conf import cinder.volume.drivers.rbd as driver LOG = logging.getLogger(__name__) CEPH_MON_DUMP = """dumped monmap epoch 1 { "epoch": 1, "fsid": "33630410-6d93-4d66-8e42-3b953cf194aa", "modified": "2013-05-22 17:44:56.343618", "created": "2013-05-22 17:44:56.343618", "mons": [ { "rank": 0, "name": "a", "addr": "[::1]:6789\/0"}, { "rank": 1, "name": "b", "addr": "[::1]:6790\/0"}, { "rank": 2, "name": "c", "addr": "[::1]:6791\/0"}, { "rank": 3, "name": "d", "addr": "127.0.0.1:6792\/0"}, { "rank": 4, "name": "e", "addr": "example.com:6791\/0"}], "quorum": [ 0, 1, 2]} """ class FakeImageService: def download(self, context, image_id, path): pass class TestUtil(test.TestCase): def test_ascii_str(self): self.assertEqual(None, driver.ascii_str(None)) self.assertEqual('foo', driver.ascii_str('foo')) self.assertEqual('foo', driver.ascii_str(u'foo')) self.assertRaises(UnicodeEncodeError, driver.ascii_str, 'foo' + unichr(300)) class RBDTestCase(test.TestCase): def setUp(self): super(RBDTestCase, self).setUp() def fake_execute(*args, **kwargs): return '', '' self.configuration = mox.MockObject(conf.Configuration) self.configuration.volume_tmp_dir = None self.configuration.rbd_pool = 'rbd' self.configuration.rbd_ceph_conf = None self.configuration.rbd_secret_uuid = None self.configuration.rbd_user = None self.configuration.append_config_values(mox.IgnoreArg()) self.rados = self.mox.CreateMockAnything() self.rbd = self.mox.CreateMockAnything() self.driver = driver.RBDDriver(execute=fake_execute, configuration=self.configuration, rados=self.rados, rbd=self.rbd) self.driver.set_initialized() def tearDown(self): super(RBDTestCase, self).tearDown() def test_create_volume(self): name = u'volume-00000001' size = 1 volume = dict(name=name, size=size) mock_client = self.mox.CreateMockAnything() self.mox.StubOutWithMock(driver, 'RADOSClient') driver.RADOSClient(self.driver).AndReturn(mock_client) mock_client.__enter__().AndReturn(mock_client) self.rbd.RBD_FEATURE_LAYERING = 1 _mock_rbd = self.mox.CreateMockAnything() self.rbd.RBD().AndReturn(_mock_rbd) _mock_rbd.create(mox.IgnoreArg(), str(name), size * 1024 ** 3, old_format=False, features=self.rbd.RBD_FEATURE_LAYERING) mock_client.__exit__(None, None, None).AndReturn(None) self.mox.ReplayAll() self.driver.create_volume(volume) @mock.patch('cinder.volume.drivers.rbd.rados') @mock.patch('cinder.volume.drivers.rbd.rbd') def test_delete_volume(self, _mock_rbd, _mock_rados): name = u'volume-00000001' volume = dict(name=name) _mock_rbd.Image = mock_rbd.Image _mock_rbd.Image.list_snaps = mock.Mock() _mock_rbd.Image.list_snaps.return_value = [] _mock_rbd.Image.unprotect_snap = mock.Mock() _mock_rbd.RBD = mock_rbd.RBD _mock_rbd.RBD.remove = mock.Mock() self.driver.rbd = _mock_rbd self.driver.rados = _mock_rados mpo = mock.patch.object with mpo(driver, 'RADOSClient') as mock_rados_client: with mpo(self.driver, '_get_clone_info') as mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mpo(self.driver, '_delete_backup_snaps') as mock_del_backup_snaps: self.driver.delete_volume(volume) self.assertTrue(mock_get_clone_info.called) self.assertTrue(_mock_rbd.Image.list_snaps.called) self.assertTrue(mock_rados_client.called) self.assertTrue(mock_del_backup_snaps.called) self.assertFalse(mock_rbd.Image.unprotect_snap.called) self.assertTrue(_mock_rbd.RBD.remove.called) @mock.patch('cinder.volume.drivers.rbd.rados') @mock.patch('cinder.volume.drivers.rbd.rbd') def test_delete_busy_volume(self, _mock_rbd, _mock_rados): name = u'volume-00000001' volume = dict(name=name) _mock_rbd.Image = mock_rbd.Image _mock_rbd.Image.list_snaps = mock.Mock() _mock_rbd.Image.list_snaps.return_value = [] _mock_rbd.Image.unprotect_snap = mock.Mock() class MyMockException(Exception): pass _mock_rbd.RBD = mock_rbd.RBD _mock_rbd.ImageBusy = MyMockException _mock_rbd.RBD.remove = mock.Mock() _mock_rbd.RBD.remove.side_effect = _mock_rbd.ImageBusy self.driver.rbd = _mock_rbd self.driver.rados = _mock_rados mpo = mock.patch.object with mpo(driver, 'RADOSClient') as mock_rados_client: with mpo(self.driver, '_get_clone_info') as mock_get_clone_info: mock_get_clone_info.return_value = (None, None, None) with mpo(self.driver, '_delete_backup_snaps') as mock_del_backup_snaps: self.assertRaises(exception.VolumeIsBusy, self.driver.delete_volume, volume) self.assertTrue(mock_get_clone_info.called) self.assertTrue(_mock_rbd.Image.list_snaps.called) self.assertTrue(mock_rados_client.called) self.assertTrue(mock_del_backup_snaps.called) self.assertFalse(mock_rbd.Image.unprotect_snap.called) self.assertTrue(_mock_rbd.RBD.remove.called) def test_create_snapshot(self): vol_name = u'volume-00000001' snap_name = u'snapshot-name' snapshot = dict(volume_name=vol_name, name=snap_name) mock_proxy = self.mox.CreateMockAnything() self.mox.StubOutWithMock(driver, 'RBDVolumeProxy') driver.RBDVolumeProxy(self.driver, vol_name) \ .AndReturn(mock_proxy) mock_proxy.__enter__().AndReturn(mock_proxy) mock_proxy.create_snap(str(snap_name)) self.rbd.RBD_FEATURE_LAYERING = 1 mock_proxy.protect_snap(str(snap_name)) mock_proxy.__exit__(None, None, None).AndReturn(None) self.mox.ReplayAll() self.driver.create_snapshot(snapshot) def test_delete_snapshot(self): vol_name = u'volume-00000001' snap_name = u'snapshot-name' snapshot = dict(volume_name=vol_name, name=snap_name) mock_proxy = self.mox.CreateMockAnything() self.mox.StubOutWithMock(driver, 'RBDVolumeProxy') driver.RBDVolumeProxy(self.driver, vol_name) \ .AndReturn(mock_proxy) mock_proxy.__enter__().AndReturn(mock_proxy) self.rbd.RBD_FEATURE_LAYERING = 1 mock_proxy.unprotect_snap(str(snap_name)) mock_proxy.remove_snap(str(snap_name)) mock_proxy.__exit__(None, None, None).AndReturn(None) self.mox.ReplayAll() self.driver.delete_snapshot(snapshot) def test_create_cloned_volume(self): src_name = u'volume-00000001' dst_name = u'volume-00000002' # Setup librbd stubs self.stubs.Set(self.driver, 'rados', mock_rados) self.stubs.Set(self.driver, 'rbd', mock_rbd) self.driver.rbd.RBD_FEATURE_LAYERING = 1 class mock_client(object): def __init__(self, *args, **kwargs): self.ioctx = None def __enter__(self, *args, **kwargs): return self def __exit__(self, type_, value, traceback): pass self.stubs.Set(driver, 'RADOSClient', mock_client) def mock_clone(*args, **kwargs): pass self.stubs.Set(self.driver.rbd.RBD, 'clone', mock_clone) self.stubs.Set(self.driver.rbd.Image, 'list_snaps', lambda *args: [{'name': 'snap1'}, {'name': 'snap2'}]) self.stubs.Set(self.driver.rbd.Image, 'parent_info', lambda *args: (None, None, None)) self.stubs.Set(self.driver.rbd.Image, 'protect_snap', lambda *args: None) self.driver.create_cloned_volume(dict(name=dst_name), dict(name=src_name)) def test_good_locations(self): locations = ['rbd://fsid/pool/image/snap', 'rbd://%2F/%2F/%2F/%2F', ] map(self.driver._parse_location, locations) def test_bad_locations(self): locations = ['rbd://image', 'http://path/to/somewhere/else', 'rbd://image/extra', 'rbd://image/', 'rbd://fsid/pool/image/', 'rbd://fsid/pool/image/snap/', 'rbd://///', ] for loc in locations: self.assertRaises(exception.ImageUnacceptable, self.driver._parse_location, loc) self.assertFalse(self.driver._is_cloneable(loc)) def test_cloneable(self): self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') location = 'rbd://abc/pool/image/snap' mock_proxy = self.mox.CreateMockAnything() self.mox.StubOutWithMock(driver, 'RBDVolumeProxy') driver.RBDVolumeProxy(self.driver, 'image', pool='pool', snapshot='snap', read_only=True).AndReturn(mock_proxy) mock_proxy.__enter__().AndReturn(mock_proxy) mock_proxy.__exit__(None, None, None).AndReturn(None) self.mox.ReplayAll() self.assertTrue(self.driver._is_cloneable(location)) def test_uncloneable_different_fsid(self): self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') location = 'rbd://def/pool/image/snap' self.assertFalse(self.driver._is_cloneable(location)) def test_uncloneable_unreadable(self): self.stubs.Set(self.driver, '_get_fsid', lambda: 'abc') location = 'rbd://abc/pool/image/snap' self.stubs.Set(self.rbd, 'Error', test.TestingException) self.mox.StubOutWithMock(driver, 'RBDVolumeProxy') driver.RBDVolumeProxy(self.driver, 'image', pool='pool', snapshot='snap', read_only=True).AndRaise(test.TestingException) self.mox.ReplayAll() self.assertFalse(self.driver._is_cloneable(location)) def _copy_image(self): @contextlib.contextmanager def fake_temp_file(dir): class FakeTmp: def __init__(self, name): self.name = name yield FakeTmp('test') def fake_fetch_to_raw(ctx, image_service, image_id, path, size=None): pass self.stubs.Set(tempfile, 'NamedTemporaryFile', fake_temp_file) self.stubs.Set(os.path, 'exists', lambda x: True) self.stubs.Set(image_utils, 'fetch_to_raw', fake_fetch_to_raw) self.stubs.Set(self.driver, 'delete_volume', lambda x: None) self.stubs.Set(self.driver, '_resize', lambda x: None) self.driver.copy_image_to_volume(None, {'name': 'test', 'size': 1}, FakeImageService(), None) def test_copy_image_no_volume_tmp(self): self.configuration.volume_tmp_dir = None self._copy_image() def test_copy_image_volume_tmp(self): self.configuration.volume_tmp_dir = '/var/run/cinder/tmp' self._copy_image() def test_update_volume_stats(self): self.stubs.Set(self.driver.configuration, 'safe_get', lambda x: 'RBD') mock_client = self.mox.CreateMockAnything() self.mox.StubOutWithMock(driver, 'RADOSClient') driver.RADOSClient(self.driver).AndReturn(mock_client) mock_client.__enter__().AndReturn(mock_client) self.mox.StubOutWithMock(mock_client, 'cluster') mock_client.cluster.get_cluster_stats().AndReturn(dict( kb=1234567890, kb_used=4567890, kb_avail=1000000000, num_objects=4683)) mock_client.__exit__(None, None, None).AndReturn(None) self.mox.ReplayAll() expected = dict( volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb=1177, free_capacity_gb=953, reserved_percentage=0) actual = self.driver.get_volume_stats(True) self.assertDictMatch(expected, actual) def test_update_volume_stats_error(self): self.stubs.Set(self.driver.configuration, 'safe_get', lambda x: 'RBD') mock_client = self.mox.CreateMockAnything() self.mox.StubOutWithMock(driver, 'RADOSClient') driver.RADOSClient(self.driver).AndReturn(mock_client) mock_client.__enter__().AndReturn(mock_client) self.mox.StubOutWithMock(mock_client, 'cluster') self.stubs.Set(self.rados, 'Error', test.TestingException) mock_client.cluster.get_cluster_stats().AndRaise(test.TestingException) mock_client.__exit__(test.TestingException, mox.IgnoreArg(), mox.IgnoreArg()).AndReturn(None) self.mox.ReplayAll() expected = dict( volume_backend_name='RBD', vendor_name='Open Source', driver_version=self.driver.VERSION, storage_protocol='ceph', total_capacity_gb='unknown', free_capacity_gb='unknown', reserved_percentage=0) actual = self.driver.get_volume_stats(True) self.assertDictMatch(expected, actual) def test_get_mon_addrs(self): self.stubs.Set(self.driver, '_execute', lambda *a: (CEPH_MON_DUMP, '')) hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.assertEqual((hosts, ports), self.driver._get_mon_addrs()) def test_initialize_connection(self): name = 'volume-00000001' hosts = ['::1', '::1', '::1', '127.0.0.1', 'example.com'] ports = ['6789', '6790', '6791', '6792', '6791'] self.stubs.Set(self.driver, '_get_mon_addrs', lambda: (hosts, ports)) expected = { 'driver_volume_type': 'rbd', 'data': { 'name': '%s/%s' % (self.configuration.rbd_pool, name), 'hosts': hosts, 'ports': ports, 'auth_enabled': False, 'auth_username': None, 'secret_type': 'ceph', 'secret_uuid': None, } } actual = self.driver.initialize_connection(dict(name=name), None) self.assertDictMatch(expected, actual) def test_clone(self): name = u'volume-00000001' volume = dict(name=name) src_pool = u'images' src_image = u'image-name' src_snap = u'snapshot-name' mock_src_client = self.mox.CreateMockAnything() mock_dst_client = self.mox.CreateMockAnything() mock_rbd = self.mox.CreateMockAnything() self.mox.StubOutWithMock(driver, 'RADOSClient') driver.RADOSClient(self.driver, src_pool).AndReturn(mock_src_client) mock_src_client.__enter__().AndReturn(mock_src_client) driver.RADOSClient(self.driver).AndReturn(mock_dst_client) mock_dst_client.__enter__().AndReturn(mock_dst_client) self.rbd.RBD_FEATURE_LAYERING = 1 self.rbd.RBD().AndReturn(mock_rbd) mock_rbd.clone(mox.IgnoreArg(), str(src_image), str(src_snap), mox.IgnoreArg(), str(name), features=self.rbd.RBD_FEATURE_LAYERING) mock_dst_client.__exit__(None, None, None).AndReturn(None) mock_src_client.__exit__(None, None, None).AndReturn(None) self.mox.ReplayAll() self.driver._clone(volume, src_pool, src_image, src_snap) def test_extend_volume(self): fake_name = u'volume-00000001' fake_size = '20' fake_vol = {'project_id': 'testprjid', 'name': fake_name, 'size': fake_size, 'id': 'a720b3c0-d1f0-11e1-9b23-0800200c9a66'} self.mox.StubOutWithMock(self.driver, '_resize') size = int(fake_size) * units.GiB self.driver._resize(fake_vol, size=size) self.mox.ReplayAll() self.driver.extend_volume(fake_vol, fake_size) self.mox.VerifyAll() def test_rbd_volume_proxy_init(self): name = u'volume-00000001' snap = u'snapshot-name' self.stubs.Set(self.driver, '_connect_to_rados', lambda x: (None, None)) self.mox.StubOutWithMock(self.driver, '_disconnect_from_rados') # no snapshot self.rbd.Image(None, str(name), snapshot=None, read_only=False) \ .AndReturn(None) # snapshot self.rbd.Image(None, str(name), snapshot=str(snap), read_only=True) \ .AndReturn(None) # error causes disconnect self.stubs.Set(self.rbd, 'Error', test.TestingException) self.rbd.Image(None, str(name), snapshot=None, read_only=False) \ .AndRaise(test.TestingException) self.driver._disconnect_from_rados(None, None) self.mox.ReplayAll() driver.RBDVolumeProxy(self.driver, name) driver.RBDVolumeProxy(self.driver, name, snapshot=snap, read_only=True) self.assertRaises(test.TestingException, driver.RBDVolumeProxy, self.driver, name) def test_connect_to_rados(self): mock_client = self.mox.CreateMockAnything() mock_ioctx = self.mox.CreateMockAnything() self.stubs.Set(self.rados, 'Error', test.TestingException) # default configured pool self.rados.Rados(rados_id=None, conffile=None).AndReturn(mock_client) mock_client.connect() mock_client.open_ioctx('rbd').AndReturn(mock_ioctx) # different pool self.rados.Rados(rados_id=None, conffile=None).AndReturn(mock_client) mock_client.connect() mock_client.open_ioctx('images').AndReturn(mock_ioctx) # error self.rados.Rados(rados_id=None, conffile=None).AndReturn(mock_client) mock_client.connect() mock_client.open_ioctx('rbd').AndRaise(test.TestingException) mock_client.shutdown() self.mox.ReplayAll() self.assertEqual((mock_client, mock_ioctx), self.driver._connect_to_rados()) self.assertEqual((mock_client, mock_ioctx), self.driver._connect_to_rados('images')) self.assertRaises(test.TestingException, self.driver._connect_to_rados) class ManagedRBDTestCase(DriverTestCase): driver_name = "cinder.volume.drivers.rbd.RBDDriver" def setUp(self): super(ManagedRBDTestCase, self).setUp() fake_image.stub_out_image_service(self.stubs) self.volume.driver.set_initialized() def _clone_volume_from_image(self, expected_status, clone_works=True): """Try to clone a volume from an image, and check the status afterwards. """ def fake_clone_image(volume, image_location, image_id): return {'provider_location': None}, True def fake_clone_error(volume, image_location, image_id): raise exception.CinderException() self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) if clone_works: self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_image) else: self.stubs.Set(self.volume.driver, 'clone_image', fake_clone_error) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' volume_id = 1 # creating volume testdata db.volume_create(self.context, {'id': volume_id, 'updated_at': timeutils.utcnow(), 'display_description': 'Test Desc', 'size': 20, 'status': 'creating', 'instance_uuid': None, 'host': 'dummy'}) try: if clone_works: self.volume.create_volume(self.context, volume_id, image_id=image_id) else: self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_id, image_id=image_id) volume = db.volume_get(self.context, volume_id) self.assertEqual(volume['status'], expected_status) finally: # cleanup db.volume_destroy(self.context, volume_id) def test_create_vol_from_image_status_available(self): """Verify that before cloning, an image is in the available state.""" self._clone_volume_from_image('available', True) def test_create_vol_from_image_status_error(self): """Verify that before cloning, an image is in the available state.""" self._clone_volume_from_image('error', False) def test_clone_image(self): # Test Failure Case(s) expected = ({}, False) self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: False) image_loc = (object(), object()) actual = self.volume.driver.clone_image(object(), image_loc, object()) self.assertEqual(expected, actual) self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) self.assertEqual(expected, self.volume.driver.clone_image(object(), None, None)) # Test Success Case(s) expected = ({'provider_location': None}, True) self.stubs.Set(self.volume.driver, '_parse_location', lambda x: ('a', 'b', 'c', 'd')) self.stubs.Set(self.volume.driver, '_clone', lambda *args: None) self.stubs.Set(self.volume.driver, '_resize', lambda *args: None) actual = self.volume.driver.clone_image(object(), image_loc, object()) self.assertEqual(expected, actual) def test_clone_success(self): self.stubs.Set(self.volume.driver, '_is_cloneable', lambda x: True) self.stubs.Set(self.volume.driver, 'clone_image', lambda a, b, c: True) image_id = 'c905cedb-7281-47e4-8a62-f26bc5fc4c77' self.assertTrue(self.volume.driver.clone_image({}, image_id, image_id))
"""Painless animated sprites which are pygame.sprite.Sprite objects. Animated sprites are loaded from GIF, but there will be other ways very soon! When using an AnimatedSprite you must remember to update it every main loop iteration with timedelta: >>> AnimatedSprite.from_gif('example.gif') # doctest: +SKIP >>> timedelta = clock.get_time() # doctest: +SKIP >>> animated_sprite.update(timedelta) # doctest: +SKIP A word on `rect` and `mask`: `pygame.Sprite` has the `rect` attribute and _optionally_ the `mask` attribute. AnimatedSprite gets those attributes from its frames, however there are some important notes regarding these two properties, AnimatedSprite.rect (always there) and AnimatedSprite.mask (optional)... AnimatedSprite.rect.size is updated with the current frame's size every time you use AnimatedSprite.update(). This means you can use AnimatedSprite.rect for position on screen or position on map, whatever, because AnimatedSprite.rect.topleft won't change unless _you_ change it! Likewise, AnimatedSprite.mask is set to the current frame's mask, if you told AnimatedSprite to generate a mask (it's optional). There are plans in the near future to allow for a single custom mask or ways to provide custom frame masks. When you load an animated sprite, you can specify a threshold: >>> AnimatedSprite.from_gif('example.gif', ... mask_threshold=254) # doctest: +SKIP Whereas, the threshold denotes the alpha transparency value for a pixel, which is opaque enough to be "set". What's supported: Right now there's only support for GIFs, but I'd like to expand that (especially since GIFs can't really take advantage of alpha transparency). """ import pygame from PIL import Image # NOTE: could be a sprite... class Frame(object): """A frame of an AnimatedSprite animation. Attributes: surface (pygame.Surface): The PyGame image which is used for a frame of an animation. mask (pygame.Mask): Mask automatically generated from the supplied surface (see above). Only exists if instantiated with mask_threshold >0. duration (integer): Milliseconds this frame lasts. How long this frame is displayed in corresponding animation. start_time (integer): The animation position in milleseconds, when this frame will start being displayed. stop_time (integer): The animation position in milleseconds, when this frame will stop being displayed. See Also: * AnimatedSprite.frames_from_gif() * AnimatedSprite.animation_position """ def __init__(self, surface, start_time, duration, mask_threshold=0): """Create a frame using a pygame surface, the start time, and the duration time. Args: surface (pygame.Surface): The surface/image for this frame. start_time (int): Millisecond this frame starts. This frame is a part of a larger series of frames and in order to render the animation properly we need to know when each frame begins to be drawn, while duration signifies when it ends. duration (integer): Milleseconds this frame lasts. See: start_time argument description. mask_threshold (int): Valid values 0-254. Alpha values ABOVE this provided number are marked as "solid"/ collidable/set. If this is not greater than zero, the mask is not generated. """ self.surface = surface self.duration = duration self.start_time = start_time self.end_time = start_time + duration if mask_threshold > 0: self.mask = pygame.mask.from_surface(surface, mask_threshold) def __repr__(self): s = "<Frame duration(%s) start_time(%s) end_time(%s)>" return s % (self.duration, self.start_time, self.end_time) # XXX: make sense to call it AniSprite? class AnimatedSprite(pygame.sprite.Sprite): """An animated sprite. The main thing! Treat like a pygame.sprite.Sprite! Attributes: total_duration (int): The total duration of of this animation in milliseconds. image (pygame.Surface): Current surface belonging to the active frame, both set by AnimatedSprite.update(). rect (pygame.Rect): When update() is called this rect size changes to match the current frame's. You'll want to checkout AnimatedSprite.update(). mask (pygame.Mask): If the first frame had a mask attribute, then we assume all do, and this (optional) attribute points to the active frame's mask attribute. frames (list[Frame]): -- active_frame_index (int): The current frame, by `frames` index. This is set by AnimatedSprite.update(). You'll want to see the `frames` attribute and of course AnimatedSprite.update(). active_frame: The current surface representing this animation at its current animation position. The AnimatedSprite.update() method sets this. animation_position (int): Animation position in milliseconds; milleseconds elapsed in this animation. This is used for determining which frame to select. Set once per tick through the AnimatedSprite.update() method. See Also: * :class:`pygame.sprite.Sprite` * :class:`Frame` """ def __init__(self, frames): """Create this AnimatedSprite using a list of Frame instances. Args: frames (list[Frame]): A properly assembled list of frames, which assumes that each Frame's start_time is greater than the previous element and is the previous element's start time + previous element/Frame's duration. Here is an example of aformentioned: >>> frame_one_surface = pygame.Surface((16, 16)) >>> frame_one = Frame(frame_one_surface, 0, 100) >>> frame_two_surface = pygame.Surface((16, 16)) >>> frame_two = Frame(frame_two_surface, 100, 50) Note: In the future I may add a method for verifying the validity of Frame start_times and durations. """ super(AnimatedSprite, self).__init__() self.frames = frames self.total_duration = self.get_total_duration(self.frames) self.active_frame_index = 0 self.active_frame = self.frames[self.active_frame_index] # animation position in milliseconds self.animation_position = 0 # this gets updated depending on the frame/time # needs to be a surface. self.image = self.frames[0].surface # represents the animated sprite's position # on screen. self.rect = self.image.get_rect() # making the bold assumption that if the # first frame has a mask, as do the rest. if hasattr(self.frames[0], 'mask'): self.mask = self.frames[0].mask def __getitem__(self, frame_index): """Return the frame corresponding to the supplied frame_index. Args: frame_index (int): Index number to lookup a frame by element number in the self.frames list. Returns: Frame: The frame of this animation at the specified index of frame_index. """ return self.frames[frame_index] def largest_frame_size(self): """Return the largest frame's (by area) dimensions as tuple(int x, int y). Returns: tuple (x, y): pixel dimensions of the largest frame surface in this AnimatedSprite. """ largest_frame_size = (0, 0) for frame in self.frames: largest_x, largest_y = largest_frame_size largest_area = largest_x * largest_y frame_size = frame.surface.get_size() frame_x, frame_y = frame_size frame_area = frame_x * frame_y if frame_area > largest_area: largest_frame_size = (frame_size) return largest_frame_size @classmethod def from_gif(cls, path_or_readable, mask_threshold=0): """The default is to create from gif bytes, but this can also be done from other methods... Create a list of surfaces (frames) and a list of their respective frame durations from an animated GIF. Args: path_or_readable (str|file-like-object): Either a string or an object with a read() method. So, either a path to an animated GIF, or a file-like-object/buffer of an animated GIF. mask_threshold (int): An optional keyword argument which must be >0 to generate masks automatically per frame. This value is used to note which parts are opaque and thus collidable, and which values are not. Think of RGBA, valid values are 0-254. See also: Frame(). Returns: AnimatedSprite: -- """ pil_gif = Image.open(path_or_readable) frame_index = 0 frames = [] time_position = 0 try: while True: duration = pil_gif.info['duration'] frame_sprite = cls.pil_image_to_pygame_surface(pil_gif) frame = Frame(surface=frame_sprite, start_time=time_position, duration=duration, mask_threshold=mask_threshold) frames.append(frame) frame_index += 1 time_position += duration pil_gif.seek(pil_gif.tell() + 1) except EOFError: pass # end of sequence return AnimatedSprite(frames) def update(self, timedelta): """Manipulate the state of this AnimatedSprite, namely the on-screen/viewport position (not absolute) and using the timedelta to do animation manipulations. Using the game's timedelta we decipher the animation position, which in turn allows us to locate the correct frame. Sets the image attribute to the current frame's image. Updates the rect attribute to the new relative position and frame size. Warning: Since we're changing the rect size on-the-fly, this can get the player stuck in certain boundaries. I will be remedying this in the future. Args: timedelta (int|float): Typically from the game clock (pygame.time.Clock) via clock.get_time(). Used to update the animation position. """ self.animation_position += timedelta if self.animation_position >= self.total_duration: self.animation_position = (self.animation_position % self.total_duration) self.active_frame_index = 0 while (self.animation_position > self.frames[self.active_frame_index].end_time): self.active_frame_index += 1 # NOTE: the fact that I'm using -1 here seems sloppy/hacky self.image = self.frames[self.active_frame_index - 1].surface self.rect.size = self.image.get_size() self.active_frame = self.frames[self.active_frame_index] # if we have a mask, let's update our pointer! # again, we make the bold assumption that if # our first frame had a mask, then the rest do. if hasattr(self, 'mask'): self.mask = self.active_frame.mask @staticmethod def get_total_duration(frames): """Return the total duration of the animation in milliseconds, milliseconds, from animation frame durations. Args: frames (List[AnimatedSpriteFrame]): -- Returns: int: The sum of all the frame's "duration" attribute. """ return sum([frame.duration for frame in frames]) @staticmethod def pil_image_to_pygame_surface(pil_image): """Convert PIL Image() to RGBA pygame Surface. Args: pil_image (Image): image to convert to pygame.Surface(). Returns: pygame.Surface: the converted image Example: >>> from PIL import Image >>> gif = Image.open('tests/resources/test-scene.gif') >>> AnimatedSprite.pil_image_to_pygame_surface(gif) <Surface(10x10x32 SW)> """ image_as_string = pil_image.convert('RGBA').tobytes() return pygame.image.fromstring(image_as_string, pil_image.size, 'RGBA')
# Copyright 2012 Nebula, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Exceptions raised by the Horizon code and the machinery for handling them. """ import logging import os import sys import six from django.core.management import color_style # noqa from django.http import HttpRequest # noqa from django.utils import encoding from django.utils.translation import ugettext_lazy as _ from django.views.debug import CLEANSED_SUBSTITUTE # noqa from django.views.debug import SafeExceptionReporterFilter # noqa from horizon.conf import HORIZON_CONFIG # noqa from horizon import messages LOG = logging.getLogger(__name__) class HorizonReporterFilter(SafeExceptionReporterFilter): """Error report filter that's always active, even in DEBUG mode.""" def is_active(self, request): return True class HorizonException(Exception): """Base exception class for distinguishing our own exception classes.""" pass class Http302(HorizonException): """Error class which can be raised from within a handler to cause an early bailout and redirect at the middleware level. """ status_code = 302 def __init__(self, location, message=None): self.location = location self.message = message class NotAuthorized(HorizonException): """Raised whenever a user attempts to access a resource which they do not have permission-based access to (such as when failing the :func:`~horizon.decorators.require_perms` decorator). The included :class:`~horizon.middleware.HorizonMiddleware` catches ``NotAuthorized`` and handles it gracefully by displaying an error message and redirecting the user to a login page. """ status_code = 401 class NotAuthenticated(HorizonException): """Raised when a user is trying to make requests and they are not logged in. The included :class:`~horizon.middleware.HorizonMiddleware` catches ``NotAuthenticated`` and handles it gracefully by displaying an error message and redirecting the user to a login page. """ status_code = 403 class NotFound(HorizonException): """Generic error to replace all "Not Found"-type API errors.""" status_code = 404 class Conflict(HorizonException): """Generic error to replace all "Conflict"-type API errors.""" status_code = 409 class BadRequest(HorizonException): """Generic error to replace all "BadRequest"-type API errors.""" status_code = 400 class RecoverableError(HorizonException): """Generic error to replace any "Recoverable"-type API errors.""" status_code = 100 # HTTP status code "Continue" class ServiceCatalogException(HorizonException): """Raised when a requested service is not available in the ``ServiceCatalog`` returned by Keystone. """ def __init__(self, service_name): message = 'Invalid service catalog service: %s' % service_name super(ServiceCatalogException, self).__init__(message) @six.python_2_unicode_compatible class AlreadyExists(HorizonException): """Exception to be raised when trying to create an API resource which already exists. """ def __init__(self, name, resource_type): self.attrs = {"name": name, "resource": resource_type} self.msg = _('A %(resource)s with the name "%(name)s" already exists.') def __repr__(self): return self.msg % self.attrs def __str__(self): return self.msg % self.attrs @six.python_2_unicode_compatible class GetFileError(HorizonException): """Exception to be raised when the value of get_file did not start with https:// or http:// """ def __init__(self, name, resource_type): self.attrs = {"name": name, "resource": resource_type} self.msg = _('The value of %(resource)s is %(name)s inside the ' 'template. When launching a stack from this interface,' ' the value must start with "http://" or "https://"') def __repr__(self): return '<%s name=%r resource_type=%r>' % (self.__class__.__name__, self.attrs['name'], self.attrs['resource_type']) def __str__(self): return self.msg % self.attrs class ConfigurationError(HorizonException): """Exception to be raised when invalid settings have been provided.""" pass class NotAvailable(HorizonException): """Exception to be raised when something is not available.""" pass class WorkflowError(HorizonException): """Exception to be raised when something goes wrong in a workflow.""" pass class WorkflowValidationError(HorizonException): """Exception raised during workflow validation if required data is missing, or existing data is not valid. """ pass class MessageFailure(HorizonException): """Exception raised during message notification.""" pass class HandledException(HorizonException): """Used internally to track exceptions that have gone through :func:`horizon.exceptions.handle` more than once. """ def __init__(self, wrapped): self.wrapped = wrapped UNAUTHORIZED = tuple(HORIZON_CONFIG['exceptions']['unauthorized']) UNAUTHORIZED += (NotAuthorized,) NOT_FOUND = tuple(HORIZON_CONFIG['exceptions']['not_found']) NOT_FOUND += (GetFileError,) RECOVERABLE = (AlreadyExists, Conflict, NotAvailable, ServiceCatalogException, BadRequest) RECOVERABLE += tuple(HORIZON_CONFIG['exceptions']['recoverable']) def error_color(msg): return color_style().ERROR_OUTPUT(msg) def check_message(keywords, message): """Checks an exception for given keywords and raises a new ``ActionError`` with the desired message if the keywords are found. This allows selective control over API error messages. """ exc_type, exc_value, exc_traceback = sys.exc_info() if set(str(exc_value).split(" ")).issuperset(set(keywords)): exc_value.message = message raise def handle_unauthorized(request, message, redirect, ignore, escalate, handled, force_silence, force_log, log_method, log_entry, log_level): if ignore: return NotAuthorized if not force_silence and not handled: log_method(error_color("Unauthorized: %s" % log_entry)) if not handled: if message: message = _("Unauthorized: %s") % message # We get some pretty useless error messages back from # some clients, so let's define our own fallback. fallback = _("Unauthorized. Please try logging in again.") messages.error(request, message or fallback) # Escalation means logging the user out and raising NotAuthorized # so the middleware will redirect them appropriately. if escalate: # Prevents creation of circular import. django.contrib.auth # requires openstack_dashboard.settings to be loaded (by trying to # access settings.CACHES in django.core.caches) while # openstack_dashboard.settings requires django.contrib.auth to be # loaded while importing openstack_auth.utils from django.contrib.auth import logout # noqa logout(request) raise NotAuthorized # Otherwise continue and present our "unauthorized" error message. return NotAuthorized def handle_notfound(request, message, redirect, ignore, escalate, handled, force_silence, force_log, log_method, log_entry, log_level): if not force_silence and not handled and (not ignore or force_log): log_method(error_color("Not Found: %s" % log_entry)) if not ignore and not handled: messages.error(request, message or log_entry) if redirect: raise Http302(redirect) if not escalate: return NotFound # return to normal code flow def handle_recoverable(request, message, redirect, ignore, escalate, handled, force_silence, force_log, log_method, log_entry, log_level): if not force_silence and not handled and (not ignore or force_log): # Default recoverable error to WARN log level log_method = getattr(LOG, log_level or "warning") log_method(error_color("Recoverable error: %s" % log_entry)) if not ignore and not handled: messages.error(request, message or log_entry) if redirect: raise Http302(redirect) if not escalate: return RecoverableError # return to normal code flow HANDLE_EXC_METHODS = [ {'exc': UNAUTHORIZED, 'handler': handle_unauthorized, 'set_wrap': False, 'escalate': True}, {'exc': NOT_FOUND, 'handler': handle_notfound, 'set_wrap': True}, {'exc': RECOVERABLE, 'handler': handle_recoverable, 'set_wrap': True}, ] def handle(request, message=None, redirect=None, ignore=False, escalate=False, log_level=None, force_log=None): """Centralized error handling for Horizon. Because Horizon consumes so many different APIs with completely different ``Exception`` types, it's necessary to have a centralized place for handling exceptions which may be raised. Exceptions are roughly divided into 3 types: #. ``UNAUTHORIZED``: Errors resulting from authentication or authorization problems. These result in being logged out and sent to the login screen. #. ``NOT_FOUND``: Errors resulting from objects which could not be located via the API. These generally result in a user-facing error message, but are otherwise returned to the normal code flow. Optionally a redirect value may be passed to the error handler so users are returned to a different view than the one requested in addition to the error message. #. ``RECOVERABLE``: Generic API errors which generate a user-facing message but drop directly back to the regular code flow. All other exceptions bubble the stack as normal unless the ``ignore`` argument is passed in as ``True``, in which case only unrecognized errors are bubbled. If the exception is not re-raised, an appropriate wrapper exception class indicating the type of exception that was encountered will be returned. """ exc_type, exc_value, exc_traceback = sys.exc_info() log_method = getattr(LOG, log_level or "exception") force_log = force_log or os.environ.get("HORIZON_TEST_RUN", False) force_silence = getattr(exc_value, "silence_logging", False) # Because the same exception may travel through this method more than # once (if it's re-raised) we may want to treat it differently # the second time (e.g. no user messages/logging). handled = issubclass(exc_type, HandledException) wrap = False # Restore our original exception information, but re-wrap it at the end if handled: exc_type, exc_value, exc_traceback = exc_value.wrapped wrap = True log_entry = encoding.force_text(exc_value) user_message = "" # We trust messages from our own exceptions if issubclass(exc_type, HorizonException): user_message = log_entry # If the message has a placeholder for the exception, fill it in elif message and "%(exc)s" in message: user_message = encoding.force_text(message) % {"exc": log_entry} elif message: user_message = encoding.force_text(message) for exc_handler in HANDLE_EXC_METHODS: if issubclass(exc_type, exc_handler['exc']): if exc_handler['set_wrap']: wrap = True handler = exc_handler['handler'] ret = handler(request, user_message, redirect, ignore, exc_handler.get('escalate', escalate), handled, force_silence, force_log, log_method, log_entry, log_level) if ret: return ret # return to normal code flow # If we've gotten here, time to wrap and/or raise our exception. if wrap: raise HandledException([exc_type, exc_value, exc_traceback]) # assume exceptions handled in the code that pass in a message are already # handled appropriately and treat as recoverable if message: ret = handle_recoverable(request, user_message, redirect, ignore, escalate, handled, force_silence, force_log, log_method, log_entry, log_level) if ret: return ret six.reraise(exc_type, exc_value, exc_traceback)
# -*- coding: utf-8 -*- # Copyright (C) 2012, Almar Klein # # Visvis is distributed under the terms of the (new) BSD License. # The full license can be found in 'license.txt'. import visvis as vv import numpy as np import OpenGL.GL as gl from visvis.processing.statistics import StatData # todo: enable notch in boxplot def boxplot(data1, data2=None, width=0.75, whiskers=1.5, axesAdjust=True, axes=None): """ boxplot(*args, width=0.75, whiskers=1.5, axesAdjust=True, axes=None) Create a box and whisker plot and returns a BoxPlot wobject that can be used to change the appearance of the boxes (such as color). If whiskers=='violin' creates a violin plot, which displays the kernel density estimate (kde) of each data. Usage ----- * boxplot(data, ...) creates boxplots for the given list of data * boxplot(X, data, ...) also supply x-coordinates for each data Arguments --------- X : iterable (optional) Specify x position of the boxes. data : list List of data, where each data is a sequence (something that can be passed to numpy.array()). width : scalar The width of the boxes. whiskers : scalar or string How to draw the whiskers. If a scalar is given, it defines the length of the whiskers as a function of the IQR. In this case any points lying beyond the whiskers are drawn as outliers. If 'minmax', the whiskers simply extend to the maximal data range. If 'std', the whiskers indicate the mean +/- the standard deviation. If 'violin', a violin plot is drawn, which shows the probability density function completely. axesAdjust : bool If True, this function will call axes.SetLimits(), and set the camera type to 3D. If daspectAuto has not been set yet, it is set to False. axes : Axes instance Display the bars in the given axes, or the current axes if not given. """ # Get axes if axes is None: axes = vv.gca() # Pre check if data2 is None: data_list = data1 else: data_list = data2 if not isinstance(data_list, (tuple,list)): raise ValueError('Data should be given as a list.') # if data2 is None: xx = range(len(data_list)) else: xx = [float(x) for x in data1] if len(data_list) != len(xx): raise ValueError('Positions do not match length of data.') # Create boxes and boxplot object boxes = [BoxPlotBox(d, width, whiskers) for d in data_list] bp = BoxPlot(axes, xx, boxes) # Adjust axes if axesAdjust: if axes.daspectAuto is None: axes.daspectAuto = True axes.cameraType = '2d' axes.SetLimits() # Done axes.Draw() return bp class BoxPlotBox(object): """ BoxPlotBox Represents a block in a boxplot. Used for storing information such as position of box and whiskers etc. """ def __init__(self, data, width, whiskers): # Get stats of data self._stats = StatData(data) # Init width self._width = width # Init whisker style self._whiskers = whiskers # Init line style self._lc = (0,0,0) self._lw = 1 # Calculate now self.calculate() def SetWidth(self, w): self._width = w self.calculate() def SetWhiskers(self, whiskers): # Lowercase and check if isinstance(whiskers, basestring): whiskers = whiskers.lower() if whiskers == 'default': whiskers = 1.5 elif whiskers not in ['minmax', 'std', 'violin']: raise ValueError('Invalid whiskers style') elif isinstance(whiskers, (float, int)): whiskers = float(whiskers) else: raise ValueError('Invalid whiskers style') # Set self._whiskers = whiskers self.calculate() def calculate(self): """ calculate() Calculate the stats, and storing them such that they can be drawn easily. """ # Init limts self._limits = vv.Range(self._stats.dmin, self._stats.dmax) # Calculate more? if isinstance(self._whiskers, float): self.calculate_outliers() elif self._whiskers == 'violin': self.calculate_violin() else: pass # we have all the info we need def calculate_outliers(self): # Get stats and data stats = self._stats data = stats._data # Set border whiskerWidth = 1.5 if isinstance(self._whiskers, float): whiskerWidth = self._whiskers # Get indices of points beyond whiskers w1 = stats.Q1 - stats.IQR * whiskerWidth w2 = stats.Q3 + stats.IQR * whiskerWidth I1, = np.where(data < w1) I2, = np.where(data > w2) # Get points for whiskers if I1.size: self._wmin = data[ I1[-1]+1 ] else: self._wmin = data[0] if I2.size: self._wmax = data[ I2[0]-1 ] else: self._wmax = data[-1] # Get outlier points Iall = np.concatenate([I1, I2]) self._outliers = data[ Iall ] def calculate_violin(self): # Get stats stats = self._stats # Get kernel density estimate nbins = stats.best_number_of_bins(8, 128) centers, values = stats.kde(nbins) # Normalize values values = values * (0.5 * self._width / values.max()) # Create array with locations n = values.size points = np.zeros((n*2+1,3), np.float32) points[:n,0] = values points[:n,1] = centers points[n:2*n,0] = -np.flipud(values) points[n:2*n,1] = np.flipud(centers) points[2*n,0] = values[0] points[2*n,1] = centers[0] # self._points = points # Update limits self._limits = vv.Range(centers[0], centers[-1]) def Draw(self, x_offset): # Prepare color and line width clr = self._lc gl.glColor3f(clr[0], clr[1], clr[2]) gl.glLineWidth(self._lw) # Set line smoothing if self._lw == int(self._lw): gl.glDisable(gl.GL_LINE_SMOOTH) else: gl.glEnable(gl.GL_LINE_SMOOTH) # Draw if self._whiskers == 'violin': self.DrawViolin(x_offset) else: self.DrawBox(x_offset) # Reset gl.glEnable(gl.GL_LINE_SMOOTH) def DrawBox(self, x_offset): # Get data stats = self._stats # Determine whisker position if isinstance(self._whiskers, float): wmin, wmax = self._wmin, self._wmax elif self._whiskers == 'minmax': wmin, wmax = stats.dmin, stats.dmax elif self._whiskers == 'std': wmin, wmax = stats.mean - stats.std, stats.mean + stats.std # Relative width w1 = self._width * 0.5 w2 = self._width * 0.125 # Draw box gl.glBegin(gl.GL_LINE_LOOP) gl.glVertex2f(x_offset-w1, stats.Q1) gl.glVertex2f(x_offset+w1, stats.Q1) gl.glVertex2f(x_offset+w1, stats.Q3) gl.glVertex2f(x_offset-w1, stats.Q3) gl.glEnd() # Draw mean, wisker lines, and wiskers gl.glBegin(gl.GL_LINES) gl.glVertex2f(x_offset-w1, stats.Q2) gl.glVertex2f(x_offset+w1, stats.Q2) # gl.glVertex2f(x_offset, stats.Q1) gl.glVertex2f(x_offset, wmin) gl.glVertex2f(x_offset, stats.Q3) gl.glVertex2f(x_offset, wmax) # gl.glVertex2f(x_offset-w2, wmin) gl.glVertex2f(x_offset+w2, wmin) gl.glVertex2f(x_offset-w2, wmax) gl.glVertex2f(x_offset+w2, wmax) gl.glEnd() # Draw outliers? if isinstance(self._whiskers, float): # Group outliers outliers = [] for p in self._outliers: if outliers and outliers[0][-1] == p: outliers[0].append(p) else: outliers.append([p]) # Draw outliers w3 = self._width * 0.125*0.5 gl.glPointSize(5) gl.glEnable(gl.GL_POINT_SMOOTH) gl.glBegin(gl.GL_POINTS) for outlierGroup in outliers: offset = - (len(outlierGroup)-1) * w3 * 0.5 offset = max(offset, -w1) for i in range(len(outlierGroup)): p = outlierGroup[i] gl.glVertex2f(x_offset+offset+w3*i, p) gl.glEnd() def DrawViolin(self, x_offset): # Get stats stats = self._stats # Smooth lines gl.glEnable(gl.GL_LINE_SMOOTH) # Translate points points = self._points.copy() points[:,0] += x_offset # Draw outer lines gl.glEnableClientState(gl.GL_VERTEX_ARRAY) gl.glVertexPointerf(points) gl.glDrawArrays(gl.GL_LINE_STRIP, 0, points.shape[0]) gl.glFlush() gl.glDisableClientState(gl.GL_VERTEX_ARRAY) # Draw mean p25, p50 and p75 w2 = self._width * 0.25 gl.glBegin(gl.GL_LINES) gl.glVertex2f(x_offset-w2, stats.Q2) gl.glVertex2f(x_offset+w2, stats.Q2) gl.glVertex2f(x_offset, stats.Q1) gl.glVertex2f(x_offset, stats.Q3) gl.glEnd() class BoxPlot(vv.Wobject): """ """ def __init__(self, parent, xx, boxes): vv.Wobject.__init__(self, parent) self._xx = xx self._boxes = boxes def OnDraw(self): # Draw all boxes for i in range(len(self._boxes)): self._boxes[i].Draw(self._xx[i]) def _GetLimits(self): # xlim and zlim are easy #x1, x2 = 0.5, len(self._boxes)-0.5 x1 = self._xx[0] - self._boxes[0]._width * 1.05 x2 = self._xx[-1] + self._boxes[-1]._width * 1.05 z1, z2 = 0, 0.2 # ylim is harder y1, y2 = 9999999999999, -99999999999999999 for box in self._boxes: y1 = min(y1, box._limits.min) y2 = max(y2, box._limits.max) if not self._boxes: y1, y2 = 0,1 # Done return vv.Wobject._GetLimits(self, x1, x2, y1, y2, z1, z2) @vv.misc.PropWithDraw def lc(): """ Get/Set the line color of the boxes. """ def fget(self): return self._boxes[0]._lc def fset(self, value): lc = vv.misc.getColor(value, 'setting line color') for box in self._boxes: box._lc = lc return locals() @vv.misc.PropWithDraw def lw(): """ Get/Set the line width of the boxes. """ def fget(self): return self._boxes[0]._lw def fset(self, value): for box in self._boxes: box._lw = float(value) return locals() @vv.misc.PropWithDraw def whiskers(): """ Get/Set the style of the whiskers. """ def fget(self): return self._boxes[0]._whiskers def fset(self, value): for box in self._boxes: box.SetWhiskers(value) return locals() if __name__ == '__main__': vv.figure(1); vv.clf() a = vv.gca() d1 = np.random.normal(1, 4, (1000,1000)) d2 = np.random.normal(2, 3, (20,)) d3 = np.random.uniform(-1, 3, (100,)) d4 = [1,2,1,2.0, 8, 2, 3, 1, 2, 2, 3, 2, 2.1, 8, 8, 8, 8, 8, 1.2, 1.3, 0, 0, 1.5, 2] b = boxplot((d1,d2,d3, d4), width=0.8, whiskers='violin') ## dd = d4 stat = StatData(dd) bins1, values1 = stat.histogram_np(normed=True) bins2, values2 = stat.histogram() bins3, values3 = stat.kde( ) vv.figure(2); vv.clf() vv.bar(bins2, values2)#, lc='r', ms='.', mc='r') vv.plot(bins3, values3)#, lc='g', ms='.', mc='g') vv.plot(bins1, values1, lc='b', ms='.', mc='b', ls=':', mw=4) #print abs(bins1-bins2).sum()
# Copyright 2019 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generic TFX model validator executor.""" import os from typing import Any, Dict, List import absl import apache_beam as beam import tensorflow_model_analysis as tfma from tfx import types from tfx.components.model_validator import constants from tfx.dsl.components.base import base_beam_executor from tfx.dsl.io import fileio from tfx.types import artifact_utils from tfx.utils import io_utils from tfx.utils import path_utils class Executor(base_beam_executor.BaseBeamExecutor): """DEPRECATED: Please use `Evaluator` instead. The model validator helps prevent bad models from being pushed to production. It does this by validating exported models against known good models (e.g. the current production model), and marking the exported model as good ("blessing it") only if the exported model's metrics are within predefined thresholds around the good model's metrics. The model validator will validate tf.serving format exported models produced by the Trainer component. The validator evaluates the models on examples created by the ExampleGen component. The validator will also automatically read data written by the Pusher component regarding the latest pushed models by using ml.metadata to query the previously pushed artifacts. To include ModelValidator in a TFX pipeline, configure your pipeline similar to https://github.com/tensorflow/tfx/blob/master/tfx/examples/chicago_taxi_pipeline/taxi_pipeline_simple.py#L110. """ # TODO(jyzhao): customized threshold support. def _pass_threshold(self, eval_result: tfma.EvalResult) -> bool: """Check threshold.""" return True # TODO(jyzhao): customized validation support. def _compare_eval_result(self, current_model_eval_result: tfma.EvalResult, blessed_model_eval_result: tfma.EvalResult) -> bool: """Compare accuracy of all metrics and return true if current is better or equal.""" for current_metric, blessed_metric in zip( current_model_eval_result.slicing_metrics, blessed_model_eval_result.slicing_metrics): # slicing_metric is a tuple, index 0 is slice, index 1 is its value. if current_metric[0] != blessed_metric[0]: raise RuntimeError('EvalResult not match {} vs {}.'.format( current_metric[0], blessed_metric[0])) # TODO(b/140455644): TFMA introduced breaking change post 0.14 release. # Remove this forward compatibility change after 0.15 release. current_model_metrics = current_metric[1] blessed_model_metrics = blessed_metric[1] try: current_model_accuracy = current_model_metrics['accuracy'] blessed_model_accuracy = blessed_model_metrics['accuracy'] except KeyError: current_model_accuracy = current_model_metrics['']['']['accuracy'] blessed_model_accuracy = blessed_model_metrics['']['']['accuracy'] if (current_model_accuracy['doubleValue'] < blessed_model_accuracy['doubleValue']): absl.logging.info( 'Current model accuracy is worse than blessed model: {}'.format( current_metric[0])) return False return True def _generate_blessing_result(self, eval_examples_uri: str, slice_spec: List[tfma.slicer.SingleSliceSpec], current_model_dir: str, blessed_model_dir: str) -> bool: current_model_eval_result_path = os.path.join( self._temp_path, constants.CURRENT_MODEL_EVAL_RESULT_PATH) blessed_model_eval_result_path = os.path.join( self._temp_path, constants.BLESSED_MODEL_EVAL_RESULT_PATH) with self._make_beam_pipeline() as pipeline: eval_data = ( pipeline | 'ReadData' >> beam.io.ReadFromTFRecord( file_pattern=io_utils.all_files_pattern(eval_examples_uri))) current_model = tfma.default_eval_shared_model( eval_saved_model_path=path_utils.eval_model_path(current_model_dir)) (eval_data | 'EvalCurrentModel' >> tfma.ExtractEvaluateAndWriteResults( # pylint: disable=expression-not-assigned eval_shared_model=current_model, slice_spec=slice_spec, output_path=current_model_eval_result_path)) if blessed_model_dir is not None: blessed_model = tfma.default_eval_shared_model( eval_saved_model_path=path_utils.eval_model_path(blessed_model_dir)) (eval_data | 'EvalBlessedModel' >> tfma.ExtractEvaluateAndWriteResults( # pylint: disable=expression-not-assigned eval_shared_model=blessed_model, slice_spec=slice_spec, output_path=blessed_model_eval_result_path)) absl.logging.info('all files in current_model_eval_result_path: [%s]', str(fileio.listdir(current_model_eval_result_path))) current_model_eval_result = tfma.load_eval_result( output_path=current_model_eval_result_path) if not self._pass_threshold(current_model_eval_result): absl.logging.info('Current model does not pass threshold.') return False absl.logging.info('Current model passes threshold.') if blessed_model_dir is None: absl.logging.info('No blessed model yet.') return True absl.logging.info('all files in blessed_model_eval_result: [%s]', str(fileio.listdir(blessed_model_eval_result_path))) blessed_model_eval_result = tfma.load_eval_result( output_path=blessed_model_eval_result_path) if (self._compare_eval_result(current_model_eval_result, blessed_model_eval_result)): absl.logging.info('Current model better than blessed model.') return True else: absl.logging.info('Current model worse than blessed model.') return False def Do(self, input_dict: Dict[str, List[types.Artifact]], output_dict: Dict[str, List[types.Artifact]], exec_properties: Dict[str, Any]) -> None: """Validate current model against last blessed model. Args: input_dict: Input dict from input key to a list of Artifacts. - examples: examples for eval the model. - model: current model for validation. output_dict: Output dict from output key to a list of Artifacts. - blessing: model blessing result. exec_properties: A dict of execution properties. - blessed_model: last blessed model for validation. - blessed_model_id: last blessed model id. Returns: None """ self._log_startup(input_dict, output_dict, exec_properties) self._temp_path = self._get_tmp_dir() absl.logging.info('Using temp path {} for tft.beam'.format(self._temp_path)) eval_examples_uri = artifact_utils.get_split_uri( input_dict[constants.EXAMPLES_KEY], 'eval') blessing = artifact_utils.get_single_instance( output_dict[constants.BLESSING_KEY]) # Current model to be validated. current_model = artifact_utils.get_single_instance( input_dict[constants.MODEL_KEY]) absl.logging.info('Using {} as current model.'.format(current_model.uri)) blessing.set_string_custom_property( constants.ARTIFACT_PROPERTY_CURRENT_MODEL_URI_KEY, current_model.uri) blessing.set_int_custom_property( constants.ARTIFACT_PROPERTY_CURRENT_MODEL_ID_KEY, current_model.id) # Denote model component_name. component_id = exec_properties['current_component_id'] blessing.set_string_custom_property('component_id', component_id) # Previous blessed model to be validated against. blessed_model_dir = exec_properties['blessed_model'] blessed_model_id = exec_properties['blessed_model_id'] absl.logging.info('Using {} as blessed model.'.format(blessed_model_dir)) if blessed_model_dir: blessing.set_string_custom_property( constants.ARTIFACT_PROPERTY_BLESSED_MODEL_URI_KEY, blessed_model_dir) blessing.set_int_custom_property( constants.ARTIFACT_PROPERTY_BLESSED_MODEL_ID_KEY, blessed_model_id) absl.logging.info('Validating model.') # TODO(b/125853306): support customized slice spec. blessed = self._generate_blessing_result( eval_examples_uri=eval_examples_uri, slice_spec=[tfma.slicer.SingleSliceSpec()], current_model_dir=current_model.uri, blessed_model_dir=blessed_model_dir) if blessed: io_utils.write_string_file( os.path.join(blessing.uri, constants.BLESSED_FILE_NAME), '') blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY, constants.BLESSED_VALUE) else: io_utils.write_string_file( os.path.join(blessing.uri, constants.NOT_BLESSED_FILE_NAME), '') blessing.set_int_custom_property(constants.ARTIFACT_PROPERTY_BLESSED_KEY, constants.NOT_BLESSED_VALUE) absl.logging.info('Blessing result {} written to {}.'.format( blessed, blessing.uri)) io_utils.delete_dir(self._temp_path) absl.logging.info('Cleaned up temp path {} on executor success.'.format( self._temp_path))
#! /usr/bin/env python # # (C) 2001-2015 Chris Liechti <cliechti@gmx.net> # # SPDX-License-Identifier: BSD-3-Clause """\ Multi-port serial<->TCP/IP forwarder. - RFC 2217 - check existence of serial port periodically - start/stop forwarders - each forwarder creates a server socket and opens the serial port - serial ports are opened only once. network connect/disconnect does not influence serial port - only one client per connection """ import os import select import socket import sys import time import traceback import serial import serial.rfc2217 import serial.tools.list_ports import dbus # Try to import the avahi service definitions properly. If the avahi module is # not available, fall back to a hard-coded solution that hopefully still works. try: import avahi except ImportError: class avahi: DBUS_NAME = "org.freedesktop.Avahi" DBUS_PATH_SERVER = "/" DBUS_INTERFACE_SERVER = "org.freedesktop.Avahi.Server" DBUS_INTERFACE_ENTRY_GROUP = DBUS_NAME + ".EntryGroup" IF_UNSPEC = -1 PROTO_UNSPEC, PROTO_INET, PROTO_INET6 = -1, 0, 1 class ZeroconfService: """\ A simple class to publish a network service with zeroconf using avahi. """ def __init__(self, name, port, stype="_http._tcp", domain="", host="", text=""): self.name = name self.stype = stype self.domain = domain self.host = host self.port = port self.text = text self.group = None def publish(self): bus = dbus.SystemBus() server = dbus.Interface( bus.get_object( avahi.DBUS_NAME, avahi.DBUS_PATH_SERVER ), avahi.DBUS_INTERFACE_SERVER ) g = dbus.Interface( bus.get_object( avahi.DBUS_NAME, server.EntryGroupNew() ), avahi.DBUS_INTERFACE_ENTRY_GROUP ) g.AddService(avahi.IF_UNSPEC, avahi.PROTO_UNSPEC, dbus.UInt32(0), self.name, self.stype, self.domain, self.host, dbus.UInt16(self.port), self.text) g.Commit() self.group = g def unpublish(self): if self.group is not None: self.group.Reset() self.group = None def __str__(self): return "%r @ %s:%s (%s)" % (self.name, self.host, self.port, self.stype) class Forwarder(ZeroconfService): """\ Single port serial<->TCP/IP forarder that depends on an external select loop. - Buffers for serial -> network and network -> serial - RFC 2217 state - Zeroconf publish/unpublish on open/close. """ def __init__(self, device, name, network_port, on_close=None, log=None): ZeroconfService.__init__(self, name, network_port, stype='_serial_port._tcp') self.alive = False self.network_port = network_port self.on_close = on_close self.log = log self.device = device self.serial = serial.Serial() self.serial.port = device self.serial.baudrate = 115200 self.serial.timeout = 0 self.socket = None self.server_socket = None self.rfc2217 = None # instantiate later, when connecting def __del__(self): try: if self.alive: self.close() except: pass # XXX errors on shutdown def open(self): """open serial port, start network server and publish service""" self.buffer_net2ser = bytearray() self.buffer_ser2net = bytearray() # open serial port try: self.serial.rts = False self.serial.open() except Exception as msg: self.handle_serial_error(msg) self.serial_settings_backup = self.serial.get_settings() # start the socket server # XXX add IPv6 support: use getaddrinfo for socket options, bind to multiple sockets? # info_list = socket.getaddrinfo(None, port, 0, socket.SOCK_STREAM, 0, socket.AI_PASSIVE) self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.server_socket.setsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR, self.server_socket.getsockopt( socket.SOL_SOCKET, socket.SO_REUSEADDR ) | 1 ) self.server_socket.setblocking(0) try: self.server_socket.bind(('', self.network_port)) self.server_socket.listen(1) except socket.error as msg: self.handle_server_error() #~ raise if self.log is not None: self.log.info("%s: Waiting for connection on %s..." % (self.device, self.network_port)) # zeroconfig self.publish() # now we are ready self.alive = True def close(self): """Close all resources and unpublish service""" if self.log is not None: self.log.info("%s: closing..." % (self.device, )) self.alive = False self.unpublish() if self.server_socket: self.server_socket.close() if self.socket: self.handle_disconnect() self.serial.close() if self.on_close is not None: # ensure it is only called once callback = self.on_close self.on_close = None callback(self) def write(self, data): """the write method is used by serial.rfc2217.PortManager. it has to write to the network.""" self.buffer_ser2net += data def update_select_maps(self, read_map, write_map, error_map): """Update dictionaries for select call. insert fd->callback mapping""" if self.alive: # always handle serial port reads read_map[self.serial] = self.handle_serial_read error_map[self.serial] = self.handle_serial_error # handle serial port writes if buffer is not empty if self.buffer_net2ser: write_map[self.serial] = self.handle_serial_write # handle network if self.socket is not None: # handle socket if connected # only read from network if the internal buffer is not # already filled. the TCP flow control will hold back data if len(self.buffer_net2ser) < 2048: read_map[self.socket] = self.handle_socket_read # only check for write readiness when there is data if self.buffer_ser2net: write_map[self.socket] = self.handle_socket_write error_map[self.socket] = self.handle_socket_error else: # no connection, ensure clear buffer self.buffer_ser2net = bytearray() # check the server socket read_map[self.server_socket] = self.handle_connect error_map[self.server_socket] = self.handle_server_error def handle_serial_read(self): """Reading from serial port""" try: data = os.read(self.serial.fileno(), 1024) if data: # store data in buffer if there is a client connected if self.socket is not None: # escape outgoing data when needed (Telnet IAC (0xff) character) if self.rfc2217: data = serial.to_bytes(self.rfc2217.escape(data)) self.buffer_ser2net += data else: self.handle_serial_error() except Exception as msg: self.handle_serial_error(msg) def handle_serial_write(self): """Writing to serial port""" try: # write a chunk n = os.write(self.serial.fileno(), bytes(self.buffer_net2ser)) # and see how large that chunk was, remove that from buffer self.buffer_net2ser = self.buffer_net2ser[n:] except Exception as msg: self.handle_serial_error(msg) def handle_serial_error(self, error=None): """Serial port error""" # terminate connection self.close() def handle_socket_read(self): """Read from socket""" try: # read a chunk from the serial port data = self.socket.recv(1024) if data: # Process RFC 2217 stuff when enabled if self.rfc2217: data = serial.to_bytes(self.rfc2217.filter(data)) # add data to buffer self.buffer_net2ser += data else: # empty read indicates disconnection self.handle_disconnect() except socket.error: self.handle_socket_error() def handle_socket_write(self): """Write to socket""" try: # write a chunk count = self.socket.send(bytes(self.buffer_ser2net)) # and remove the sent data from the buffer self.buffer_ser2net = self.buffer_ser2net[count:] except socket.error: self.handle_socket_error() def handle_socket_error(self): """Socket connection fails""" self.handle_disconnect() def handle_connect(self): """Server socket gets a connection""" # accept a connection in any case, close connection # below if already busy connection, addr = self.server_socket.accept() if self.socket is None: self.socket = connection # More quickly detect bad clients who quit without closing the # connection: After 1 second of idle, start sending TCP keep-alive # packets every 1 second. If 3 consecutive keep-alive packets # fail, assume the client is gone and close the connection. self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1) self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 1) self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3) self.socket.setblocking(0) self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) if self.log is not None: self.log.warning('%s: Connected by %s:%s' % (self.device, addr[0], addr[1])) self.serial.rts = True self.serial.dtr = True if self.log is not None: self.rfc2217 = serial.rfc2217.PortManager(self.serial, self, logger=log.getChild(self.device)) else: self.rfc2217 = serial.rfc2217.PortManager(self.serial, self) else: # reject connection if there is already one connection.close() if self.log is not None: self.log.warning('%s: Rejecting connect from %s:%s' % (self.device, addr[0], addr[1])) def handle_server_error(self): """Socket server fails""" self.close() def handle_disconnect(self): """Socket gets disconnected""" # signal disconnected terminal with control lines try: self.serial.rts = False self.serial.dtr = False finally: # restore original port configuration in case it was changed self.serial.apply_settings(self.serial_settings_backup) # stop RFC 2217 state machine self.rfc2217 = None # clear send buffer self.buffer_ser2net = bytearray() # close network connection if self.socket is not None: self.socket.close() self.socket = None if self.log is not None: self.log.warning('%s: Disconnected' % self.device) def test(): service = ZeroconfService(name="TestService", port=3000) service.publish() raw_input("Press any key to unpublish the service ") service.unpublish() if __name__ == '__main__': import logging import argparse VERBOSTIY = [ logging.ERROR, # 0 logging.WARNING, # 1 (default) logging.INFO, # 2 logging.DEBUG, # 3 ] parser = argparse.ArgumentParser(usage="""\ %(prog)s [options] Announce the existence of devices using zeroconf and provide a TCP/IP <-> serial port gateway (implements RFC 2217). If running as daemon, write to syslog. Otherwise write to stdout. """, epilog="""\ NOTE: no security measures are implemented. Anyone can remotely connect to this service over the network. Only one connection at once, per port, is supported. When the connection is terminated, it waits for the next connect. """) group = parser.add_argument_group("serial port settings") group.add_argument( "--ports-regex", help="specify a regex to search against the serial devices and their descriptions (default: %(default)s)", default='/dev/ttyUSB[0-9]+', metavar="REGEX") group = parser.add_argument_group("network settings") group.add_argument( "--tcp-port", dest="base_port", help="specify lowest TCP port number (default: %(default)s)", default=7000, type=int, metavar="PORT") group = parser.add_argument_group("daemon") group.add_argument( "-d", "--daemon", dest="daemonize", action="store_true", help="start as daemon", default=False) group.add_argument( "--pidfile", help="specify a name for the PID file", default=None, metavar="FILE") group = parser.add_argument_group("diagnostics") group.add_argument( "-o", "--logfile", help="write messages file instead of stdout", default=None, metavar="FILE") group.add_argument( "-q", "--quiet", dest="verbosity", action="store_const", const=0, help="suppress most diagnostic messages", default=1) group.add_argument( "-v", "--verbose", dest="verbosity", action="count", help="increase diagnostic messages") args = parser.parse_args() # set up logging logging.basicConfig(level=VERBOSTIY[min(args.verbosity, len(VERBOSTIY) - 1)]) log = logging.getLogger('port_publisher') # redirect output if specified if args.logfile is not None: class WriteFlushed: def __init__(self, fileobj): self.fileobj = fileobj def write(self, s): self.fileobj.write(s) self.fileobj.flush() def close(self): self.fileobj.close() sys.stdout = sys.stderr = WriteFlushed(open(args.logfile, 'a')) # atexit.register(lambda: sys.stdout.close()) if args.daemonize: # if running as daemon is requested, do the fork magic # args.quiet = True # do the UNIX double-fork magic, see Stevens' "Advanced # Programming in the UNIX Environment" for details (ISBN 0201563177) try: pid = os.fork() if pid > 0: # exit first parent sys.exit(0) except OSError as e: log.critical("fork #1 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) # decouple from parent environment os.chdir("/") # don't prevent unmounting.... os.setsid() os.umask(0) # do second fork try: pid = os.fork() if pid > 0: # exit from second parent, save eventual PID before if args.pidfile is not None: open(args.pidfile, 'w').write("%d" % pid) sys.exit(0) except OSError as e: log.critical("fork #2 failed: %d (%s)\n" % (e.errno, e.strerror)) sys.exit(1) if args.logfile is None: import syslog syslog.openlog("serial port publisher") # redirect output to syslog class WriteToSysLog: def __init__(self): self.buffer = '' def write(self, s): self.buffer += s if '\n' in self.buffer: output, self.buffer = self.buffer.split('\n', 1) syslog.syslog(output) def flush(self): syslog.syslog(self.buffer) self.buffer = '' def close(self): self.flush() sys.stdout = sys.stderr = WriteToSysLog() # ensure the that the daemon runs a normal user, if run as root #if os.getuid() == 0: # name, passwd, uid, gid, desc, home, shell = pwd.getpwnam('someuser') # os.setgid(gid) # set group first # os.setuid(uid) # set user # keep the published stuff in a dictionary published = {} # get a nice hostname hostname = socket.gethostname() def unpublish(forwarder): """when forwarders die, we need to unregister them""" try: del published[forwarder.device] except KeyError: pass else: log.info("unpublish: %s" % (forwarder)) alive = True next_check = 0 # main loop while alive: try: # if it is time, check for serial port devices now = time.time() if now > next_check: next_check = now + 5 connected = [d for d, p, i in serial.tools.list_ports.grep(args.ports_regex)] # Handle devices that are published, but no longer connected for device in set(published).difference(connected): log.info("unpublish: %s" % (published[device])) unpublish(published[device]) # Handle devices that are connected but not yet published for device in sorted(set(connected).difference(published)): # Find the first available port, starting from specified number port = args.base_port ports_in_use = [f.network_port for f in published.values()] while port in ports_in_use: port += 1 published[device] = Forwarder( device, "%s on %s" % (device, hostname), port, on_close=unpublish, log=log) log.warning("publish: %s" % (published[device])) published[device].open() # select_start = time.time() read_map = {} write_map = {} error_map = {} for publisher in published.values(): publisher.update_select_maps(read_map, write_map, error_map) readers, writers, errors = select.select( read_map.keys(), write_map.keys(), error_map.keys(), 5) # select_end = time.time() # print "select used %.3f s" % (select_end - select_start) for reader in readers: read_map[reader]() for writer in writers: write_map[writer]() for error in errors: error_map[error]() # print "operation used %.3f s" % (time.time() - select_end) except KeyboardInterrupt: alive = False sys.stdout.write('\n') except SystemExit: raise except: #~ raise traceback.print_exc()
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ class state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding/tlvs/tlv/ero-metric/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: State parameters relating to the ERO Metric Sub-TLV of the SID/Label binding TLV """ __slots__ = ("_path_helper", "_extmethods", "__metric") _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__metric = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "lsdb", "lsa-types", "lsa-type", "lsas", "lsa", "opaque-lsa", "extended-prefix", "tlvs", "tlv", "sid-label-binding", "tlvs", "tlv", "ero-metric", "state", ] def _get_metric(self): """ Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_metric/state/metric (uint32) YANG Description: The metric representing the aggregate IGP or TE path cost for the binding included within the SID/Label Binding TLV """ return self.__metric def _set_metric(self, v, load=False): """ Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_metric/state/metric (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_metric is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_metric() directly. YANG Description: The metric representing the aggregate IGP or TE path cost for the binding included within the SID/Label Binding TLV """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """metric must be of a type compatible with uint32""", "defined-type": "uint32", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""", } ) self.__metric = t if hasattr(self, "_set"): self._set() def _unset_metric(self): self.__metric = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=False, ) metric = __builtin__.property(_get_metric) _pyangbind_elements = OrderedDict([("metric", metric)]) class state(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-network-instance-l2 - based on the path /network-instances/network-instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa-types/lsa-type/lsas/lsa/opaque-lsa/extended-prefix/tlvs/tlv/sid-label-binding/tlvs/tlv/ero-metric/state. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: State parameters relating to the ERO Metric Sub-TLV of the SID/Label binding TLV """ __slots__ = ("_path_helper", "_extmethods", "__metric") _yang_name = "state" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__metric = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=False, ) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return [ "network-instances", "network-instance", "protocols", "protocol", "ospfv2", "areas", "area", "lsdb", "lsa-types", "lsa-type", "lsas", "lsa", "opaque-lsa", "extended-prefix", "tlvs", "tlv", "sid-label-binding", "tlvs", "tlv", "ero-metric", "state", ] def _get_metric(self): """ Getter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_metric/state/metric (uint32) YANG Description: The metric representing the aggregate IGP or TE path cost for the binding included within the SID/Label Binding TLV """ return self.__metric def _set_metric(self, v, load=False): """ Setter method for metric, mapped from YANG variable /network_instances/network_instance/protocols/protocol/ospfv2/areas/area/lsdb/lsa_types/lsa_type/lsas/lsa/opaque_lsa/extended_prefix/tlvs/tlv/sid_label_binding/tlvs/tlv/ero_metric/state/metric (uint32) If this variable is read-only (config: false) in the source YANG file, then _set_metric is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_metric() directly. YANG Description: The metric representing the aggregate IGP or TE path cost for the binding included within the SID/Label Binding TLV """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """metric must be of a type compatible with uint32""", "defined-type": "uint32", "generated-type": """YANGDynClass(base=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='http://openconfig.net/yang/network-instance', defining_module='openconfig-network-instance', yang_type='uint32', is_config=False)""", } ) self.__metric = t if hasattr(self, "_set"): self._set() def _unset_metric(self): self.__metric = YANGDynClass( base=RestrictedClassType( base_type=long, restriction_dict={"range": ["0..4294967295"]}, int_size=32, ), is_leaf=True, yang_name="metric", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace="http://openconfig.net/yang/network-instance", defining_module="openconfig-network-instance", yang_type="uint32", is_config=False, ) metric = __builtin__.property(_get_metric) _pyangbind_elements = OrderedDict([("metric", metric)])
# -*- coding: utf-8 -*- from __future__ import print_function from datetime import datetime, timedelta import numpy as np from pandas.compat import lrange from pandas import (DataFrame, Series, Index, MultiIndex, RangeIndex) import pandas as pd from pandas.util.testing import (assert_series_equal, assert_frame_equal, assertRaisesRegexp) import pandas.util.testing as tm from pandas.tests.frame.common import TestData class TestDataFrameAlterAxes(tm.TestCase, TestData): _multiprocess_can_split_ = True def test_set_index(self): idx = Index(np.arange(len(self.mixed_frame))) # cache it _ = self.mixed_frame['foo'] # noqa self.mixed_frame.index = idx self.assertIs(self.mixed_frame['foo'].index, idx) with assertRaisesRegexp(ValueError, 'Length mismatch'): self.mixed_frame.index = idx[::2] def test_set_index_cast(self): # issue casting an index then set_index df = DataFrame({'A': [1.1, 2.2, 3.3], 'B': [5.0, 6.1, 7.2]}, index=[2010, 2011, 2012]) expected = df.ix[2010] new_index = df.index.astype(np.int32) df.index = new_index result = df.ix[2010] assert_series_equal(result, expected) def test_set_index2(self): df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'], 'B': ['one', 'two', 'three', 'one', 'two'], 'C': ['a', 'b', 'c', 'd', 'e'], 'D': np.random.randn(5), 'E': np.random.randn(5)}) # new object, single-column result = df.set_index('C') result_nodrop = df.set_index('C', drop=False) index = Index(df['C'], name='C') expected = df.ix[:, ['A', 'B', 'D', 'E']] expected.index = index expected_nodrop = df.copy() expected_nodrop.index = index assert_frame_equal(result, expected) assert_frame_equal(result_nodrop, expected_nodrop) self.assertEqual(result.index.name, index.name) # inplace, single df2 = df.copy() df2.set_index('C', inplace=True) assert_frame_equal(df2, expected) df3 = df.copy() df3.set_index('C', drop=False, inplace=True) assert_frame_equal(df3, expected_nodrop) # create new object, multi-column result = df.set_index(['A', 'B']) result_nodrop = df.set_index(['A', 'B'], drop=False) index = MultiIndex.from_arrays([df['A'], df['B']], names=['A', 'B']) expected = df.ix[:, ['C', 'D', 'E']] expected.index = index expected_nodrop = df.copy() expected_nodrop.index = index assert_frame_equal(result, expected) assert_frame_equal(result_nodrop, expected_nodrop) self.assertEqual(result.index.names, index.names) # inplace df2 = df.copy() df2.set_index(['A', 'B'], inplace=True) assert_frame_equal(df2, expected) df3 = df.copy() df3.set_index(['A', 'B'], drop=False, inplace=True) assert_frame_equal(df3, expected_nodrop) # corner case with assertRaisesRegexp(ValueError, 'Index has duplicate keys'): df.set_index('A', verify_integrity=True) # append result = df.set_index(['A', 'B'], append=True) xp = df.reset_index().set_index(['index', 'A', 'B']) xp.index.names = [None, 'A', 'B'] assert_frame_equal(result, xp) # append to existing multiindex rdf = df.set_index(['A'], append=True) rdf = rdf.set_index(['B', 'C'], append=True) expected = df.set_index(['A', 'B', 'C'], append=True) assert_frame_equal(rdf, expected) # Series result = df.set_index(df.C) self.assertEqual(result.index.name, 'C') def test_set_index_nonuniq(self): df = DataFrame({'A': ['foo', 'foo', 'foo', 'bar', 'bar'], 'B': ['one', 'two', 'three', 'one', 'two'], 'C': ['a', 'b', 'c', 'd', 'e'], 'D': np.random.randn(5), 'E': np.random.randn(5)}) with assertRaisesRegexp(ValueError, 'Index has duplicate keys'): df.set_index('A', verify_integrity=True, inplace=True) self.assertIn('A', df) def test_set_index_bug(self): # GH1590 df = DataFrame({'val': [0, 1, 2], 'key': ['a', 'b', 'c']}) df2 = df.select(lambda indx: indx >= 1) rs = df2.set_index('key') xp = DataFrame({'val': [1, 2]}, Index(['b', 'c'], name='key')) assert_frame_equal(rs, xp) def test_set_index_pass_arrays(self): df = DataFrame({'A': ['foo', 'bar', 'foo', 'bar', 'foo', 'bar', 'foo', 'foo'], 'B': ['one', 'one', 'two', 'three', 'two', 'two', 'one', 'three'], 'C': np.random.randn(8), 'D': np.random.randn(8)}) # multiple columns result = df.set_index(['A', df['B'].values], drop=False) expected = df.set_index(['A', 'B'], drop=False) # TODO should set_index check_names ? assert_frame_equal(result, expected, check_names=False) def test_construction_with_categorical_index(self): ci = tm.makeCategoricalIndex(10) # with Categorical df = DataFrame({'A': np.random.randn(10), 'B': ci.values}) idf = df.set_index('B') str(idf) tm.assert_index_equal(idf.index, ci, check_names=False) self.assertEqual(idf.index.name, 'B') # from a CategoricalIndex df = DataFrame({'A': np.random.randn(10), 'B': ci}) idf = df.set_index('B') str(idf) tm.assert_index_equal(idf.index, ci, check_names=False) self.assertEqual(idf.index.name, 'B') idf = df.set_index('B').reset_index().set_index('B') str(idf) tm.assert_index_equal(idf.index, ci, check_names=False) self.assertEqual(idf.index.name, 'B') new_df = idf.reset_index() new_df.index = df.B tm.assert_index_equal(new_df.index, ci, check_names=False) self.assertEqual(idf.index.name, 'B') def test_set_index_cast_datetimeindex(self): df = DataFrame({'A': [datetime(2000, 1, 1) + timedelta(i) for i in range(1000)], 'B': np.random.randn(1000)}) idf = df.set_index('A') tm.assertIsInstance(idf.index, pd.DatetimeIndex) # don't cast a DatetimeIndex WITH a tz, leave as object # GH 6032 i = (pd.DatetimeIndex( pd.tseries.tools.to_datetime(['2013-1-1 13:00', '2013-1-2 14:00'], errors="raise")) .tz_localize('US/Pacific')) df = DataFrame(np.random.randn(2, 1), columns=['A']) expected = Series(np.array([pd.Timestamp('2013-01-01 13:00:00-0800', tz='US/Pacific'), pd.Timestamp('2013-01-02 14:00:00-0800', tz='US/Pacific')], dtype="object")) # convert index to series result = Series(i) assert_series_equal(result, expected) # assignt to frame df['B'] = i result = df['B'] assert_series_equal(result, expected, check_names=False) self.assertEqual(result.name, 'B') # keep the timezone result = i.to_series(keep_tz=True) assert_series_equal(result.reset_index(drop=True), expected) # convert to utc df['C'] = i.to_series().reset_index(drop=True) result = df['C'] comp = pd.DatetimeIndex(expected.values).copy() comp.tz = None self.assert_numpy_array_equal(result.values, comp.values) # list of datetimes with a tz df['D'] = i.to_pydatetime() result = df['D'] assert_series_equal(result, expected, check_names=False) self.assertEqual(result.name, 'D') # GH 6785 # set the index manually import pytz df = DataFrame( [{'ts': datetime(2014, 4, 1, tzinfo=pytz.utc), 'foo': 1}]) expected = df.set_index('ts') df.index = df['ts'] df.pop('ts') assert_frame_equal(df, expected) # GH 3950 # reset_index with single level for tz in ['UTC', 'Asia/Tokyo', 'US/Eastern']: idx = pd.date_range('1/1/2011', periods=5, freq='D', tz=tz, name='idx') df = pd.DataFrame( {'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, index=idx) expected = pd.DataFrame({'idx': [datetime(2011, 1, 1), datetime(2011, 1, 2), datetime(2011, 1, 3), datetime(2011, 1, 4), datetime(2011, 1, 5)], 'a': range(5), 'b': ['A', 'B', 'C', 'D', 'E']}, columns=['idx', 'a', 'b']) expected['idx'] = expected['idx'].apply( lambda d: pd.Timestamp(d, tz=tz)) assert_frame_equal(df.reset_index(), expected) def test_set_index_timezone(self): # GH 12358 # tz-aware Series should retain the tz i = pd.to_datetime(["2014-01-01 10:10:10"], utc=True).tz_convert('Europe/Rome') df = DataFrame({'i': i}) self.assertEqual(df.set_index(i).index[0].hour, 11) self.assertEqual(pd.DatetimeIndex(pd.Series(df.i))[0].hour, 11) self.assertEqual(df.set_index(df.i).index[0].hour, 11) def test_set_index_dst(self): di = pd.date_range('2006-10-29 00:00:00', periods=3, req='H', tz='US/Pacific') df = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]}, index=di).reset_index() # single level res = df.set_index('index') exp = pd.DataFrame(data={'a': [0, 1, 2], 'b': [3, 4, 5]}, index=pd.Index(di, name='index')) tm.assert_frame_equal(res, exp) # GH 12920 res = df.set_index(['index', 'a']) exp_index = pd.MultiIndex.from_arrays([di, [0, 1, 2]], names=['index', 'a']) exp = pd.DataFrame({'b': [3, 4, 5]}, index=exp_index) tm.assert_frame_equal(res, exp) def test_set_index_multiindexcolumns(self): columns = MultiIndex.from_tuples([('foo', 1), ('foo', 2), ('bar', 1)]) df = DataFrame(np.random.randn(3, 3), columns=columns) rs = df.set_index(df.columns[0]) xp = df.ix[:, 1:] xp.index = df.ix[:, 0].values xp.index.names = [df.columns[0]] assert_frame_equal(rs, xp) def test_set_index_empty_column(self): # #1971 df = DataFrame([ dict(a=1, p=0), dict(a=2, m=10), dict(a=3, m=11, p=20), dict(a=4, m=12, p=21) ], columns=('a', 'm', 'p', 'x')) # it works! result = df.set_index(['a', 'x']) repr(result) def test_set_columns(self): cols = Index(np.arange(len(self.mixed_frame.columns))) self.mixed_frame.columns = cols with assertRaisesRegexp(ValueError, 'Length mismatch'): self.mixed_frame.columns = cols[::2] # Renaming def test_rename(self): mapping = { 'A': 'a', 'B': 'b', 'C': 'c', 'D': 'd' } renamed = self.frame.rename(columns=mapping) renamed2 = self.frame.rename(columns=str.lower) assert_frame_equal(renamed, renamed2) assert_frame_equal(renamed2.rename(columns=str.upper), self.frame, check_names=False) # index data = { 'A': {'foo': 0, 'bar': 1} } # gets sorted alphabetical df = DataFrame(data) renamed = df.rename(index={'foo': 'bar', 'bar': 'foo'}) tm.assert_index_equal(renamed.index, pd.Index(['foo', 'bar'])) renamed = df.rename(index=str.upper) tm.assert_index_equal(renamed.index, pd.Index(['BAR', 'FOO'])) # have to pass something self.assertRaises(TypeError, self.frame.rename) # partial columns renamed = self.frame.rename(columns={'C': 'foo', 'D': 'bar'}) tm.assert_index_equal(renamed.columns, pd.Index(['A', 'B', 'foo', 'bar'])) # other axis renamed = self.frame.T.rename(index={'C': 'foo', 'D': 'bar'}) tm.assert_index_equal(renamed.index, pd.Index(['A', 'B', 'foo', 'bar'])) # index with name index = Index(['foo', 'bar'], name='name') renamer = DataFrame(data, index=index) renamed = renamer.rename(index={'foo': 'bar', 'bar': 'foo'}) tm.assert_index_equal(renamed.index, pd.Index(['bar', 'foo'], name='name')) self.assertEqual(renamed.index.name, renamer.index.name) # MultiIndex tuples_index = [('foo1', 'bar1'), ('foo2', 'bar2')] tuples_columns = [('fizz1', 'buzz1'), ('fizz2', 'buzz2')] index = MultiIndex.from_tuples(tuples_index, names=['foo', 'bar']) columns = MultiIndex.from_tuples( tuples_columns, names=['fizz', 'buzz']) renamer = DataFrame([(0, 0), (1, 1)], index=index, columns=columns) renamed = renamer.rename(index={'foo1': 'foo3', 'bar2': 'bar3'}, columns={'fizz1': 'fizz3', 'buzz2': 'buzz3'}) new_index = MultiIndex.from_tuples([('foo3', 'bar1'), ('foo2', 'bar3')], names=['foo', 'bar']) new_columns = MultiIndex.from_tuples([('fizz3', 'buzz1'), ('fizz2', 'buzz3')], names=['fizz', 'buzz']) self.assert_index_equal(renamed.index, new_index) self.assert_index_equal(renamed.columns, new_columns) self.assertEqual(renamed.index.names, renamer.index.names) self.assertEqual(renamed.columns.names, renamer.columns.names) def test_rename_nocopy(self): renamed = self.frame.rename(columns={'C': 'foo'}, copy=False) renamed['foo'] = 1. self.assertTrue((self.frame['C'] == 1.).all()) def test_rename_inplace(self): self.frame.rename(columns={'C': 'foo'}) self.assertIn('C', self.frame) self.assertNotIn('foo', self.frame) c_id = id(self.frame['C']) frame = self.frame.copy() frame.rename(columns={'C': 'foo'}, inplace=True) self.assertNotIn('C', frame) self.assertIn('foo', frame) self.assertNotEqual(id(frame['foo']), c_id) def test_rename_bug(self): # GH 5344 # rename set ref_locs, and set_index was not resetting df = DataFrame({0: ['foo', 'bar'], 1: ['bah', 'bas'], 2: [1, 2]}) df = df.rename(columns={0: 'a'}) df = df.rename(columns={1: 'b'}) df = df.set_index(['a', 'b']) df.columns = ['2001-01-01'] expected = DataFrame([[1], [2]], index=MultiIndex.from_tuples( [('foo', 'bah'), ('bar', 'bas')], names=['a', 'b']), columns=['2001-01-01']) assert_frame_equal(df, expected) def test_reorder_levels(self): index = MultiIndex(levels=[['bar'], ['one', 'two', 'three'], [0, 1]], labels=[[0, 0, 0, 0, 0, 0], [0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1]], names=['L0', 'L1', 'L2']) df = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=index) # no change, position result = df.reorder_levels([0, 1, 2]) assert_frame_equal(df, result) # no change, labels result = df.reorder_levels(['L0', 'L1', 'L2']) assert_frame_equal(df, result) # rotate, position result = df.reorder_levels([1, 2, 0]) e_idx = MultiIndex(levels=[['one', 'two', 'three'], [0, 1], ['bar']], labels=[[0, 1, 2, 0, 1, 2], [0, 1, 0, 1, 0, 1], [0, 0, 0, 0, 0, 0]], names=['L1', 'L2', 'L0']) expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=e_idx) assert_frame_equal(result, expected) result = df.reorder_levels([0, 0, 0]) e_idx = MultiIndex(levels=[['bar'], ['bar'], ['bar']], labels=[[0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0]], names=['L0', 'L0', 'L0']) expected = DataFrame({'A': np.arange(6), 'B': np.arange(6)}, index=e_idx) assert_frame_equal(result, expected) result = df.reorder_levels(['L0', 'L0', 'L0']) assert_frame_equal(result, expected) def test_reset_index(self): stacked = self.frame.stack()[::2] stacked = DataFrame({'foo': stacked, 'bar': stacked}) names = ['first', 'second'] stacked.index.names = names deleveled = stacked.reset_index() for i, (lev, lab) in enumerate(zip(stacked.index.levels, stacked.index.labels)): values = lev.take(lab) name = names[i] tm.assert_index_equal(values, Index(deleveled[name])) stacked.index.names = [None, None] deleveled2 = stacked.reset_index() tm.assert_series_equal(deleveled['first'], deleveled2['level_0'], check_names=False) tm.assert_series_equal(deleveled['second'], deleveled2['level_1'], check_names=False) # default name assigned rdf = self.frame.reset_index() exp = pd.Series(self.frame.index.values, name='index') self.assert_series_equal(rdf['index'], exp) # default name assigned, corner case df = self.frame.copy() df['index'] = 'foo' rdf = df.reset_index() exp = pd.Series(self.frame.index.values, name='level_0') self.assert_series_equal(rdf['level_0'], exp) # but this is ok self.frame.index.name = 'index' deleveled = self.frame.reset_index() self.assert_series_equal(deleveled['index'], pd.Series(self.frame.index)) self.assert_index_equal(deleveled.index, pd.Index(np.arange(len(deleveled)))) # preserve column names self.frame.columns.name = 'columns' resetted = self.frame.reset_index() self.assertEqual(resetted.columns.name, 'columns') # only remove certain columns frame = self.frame.reset_index().set_index(['index', 'A', 'B']) rs = frame.reset_index(['A', 'B']) # TODO should reset_index check_names ? assert_frame_equal(rs, self.frame, check_names=False) rs = frame.reset_index(['index', 'A', 'B']) assert_frame_equal(rs, self.frame.reset_index(), check_names=False) rs = frame.reset_index(['index', 'A', 'B']) assert_frame_equal(rs, self.frame.reset_index(), check_names=False) rs = frame.reset_index('A') xp = self.frame.reset_index().set_index(['index', 'B']) assert_frame_equal(rs, xp, check_names=False) # test resetting in place df = self.frame.copy() resetted = self.frame.reset_index() df.reset_index(inplace=True) assert_frame_equal(df, resetted, check_names=False) frame = self.frame.reset_index().set_index(['index', 'A', 'B']) rs = frame.reset_index('A', drop=True) xp = self.frame.copy() del xp['A'] xp = xp.set_index(['B'], append=True) assert_frame_equal(rs, xp, check_names=False) def test_reset_index_right_dtype(self): time = np.arange(0.0, 10, np.sqrt(2) / 2) s1 = Series((9.81 * time ** 2) / 2, index=Index(time, name='time'), name='speed') df = DataFrame(s1) resetted = s1.reset_index() self.assertEqual(resetted['time'].dtype, np.float64) resetted = df.reset_index() self.assertEqual(resetted['time'].dtype, np.float64) def test_reset_index_multiindex_col(self): vals = np.random.randn(3, 3).astype(object) idx = ['x', 'y', 'z'] full = np.hstack(([[x] for x in idx], vals)) df = DataFrame(vals, Index(idx, name='a'), columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']]) rs = df.reset_index() xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'], ['', 'mean', 'median', 'mean']]) assert_frame_equal(rs, xp) rs = df.reset_index(col_fill=None) xp = DataFrame(full, columns=[['a', 'b', 'b', 'c'], ['a', 'mean', 'median', 'mean']]) assert_frame_equal(rs, xp) rs = df.reset_index(col_level=1, col_fill='blah') xp = DataFrame(full, columns=[['blah', 'b', 'b', 'c'], ['a', 'mean', 'median', 'mean']]) assert_frame_equal(rs, xp) df = DataFrame(vals, MultiIndex.from_arrays([[0, 1, 2], ['x', 'y', 'z']], names=['d', 'a']), columns=[['b', 'b', 'c'], ['mean', 'median', 'mean']]) rs = df.reset_index('a', ) xp = DataFrame(full, Index([0, 1, 2], name='d'), columns=[['a', 'b', 'b', 'c'], ['', 'mean', 'median', 'mean']]) assert_frame_equal(rs, xp) rs = df.reset_index('a', col_fill=None) xp = DataFrame(full, Index(lrange(3), name='d'), columns=[['a', 'b', 'b', 'c'], ['a', 'mean', 'median', 'mean']]) assert_frame_equal(rs, xp) rs = df.reset_index('a', col_fill='blah', col_level=1) xp = DataFrame(full, Index(lrange(3), name='d'), columns=[['blah', 'b', 'b', 'c'], ['a', 'mean', 'median', 'mean']]) assert_frame_equal(rs, xp) def test_reset_index_with_datetimeindex_cols(self): # GH5818 # df = pd.DataFrame([[1, 2], [3, 4]], columns=pd.date_range('1/1/2013', '1/2/2013'), index=['A', 'B']) result = df.reset_index() expected = pd.DataFrame([['A', 1, 2], ['B', 3, 4]], columns=['index', datetime(2013, 1, 1), datetime(2013, 1, 2)]) assert_frame_equal(result, expected) def test_reset_index_range(self): # GH 12071 df = pd.DataFrame([[0, 0], [1, 1]], columns=['A', 'B'], index=RangeIndex(stop=2)) result = df.reset_index() tm.assertIsInstance(result.index, RangeIndex) expected = pd.DataFrame([[0, 0, 0], [1, 1, 1]], columns=['index', 'A', 'B'], index=RangeIndex(stop=2)) assert_frame_equal(result, expected) def test_set_index_names(self): df = pd.util.testing.makeDataFrame() df.index.name = 'name' self.assertEqual(df.set_index(df.index).index.names, ['name']) mi = MultiIndex.from_arrays(df[['A', 'B']].T.values, names=['A', 'B']) mi2 = MultiIndex.from_arrays(df[['A', 'B', 'A', 'B']].T.values, names=['A', 'B', 'A', 'B']) df = df.set_index(['A', 'B']) self.assertEqual(df.set_index(df.index).index.names, ['A', 'B']) # Check that set_index isn't converting a MultiIndex into an Index self.assertTrue(isinstance(df.set_index(df.index).index, MultiIndex)) # Check actual equality tm.assert_index_equal(df.set_index(df.index).index, mi) # Check that [MultiIndex, MultiIndex] yields a MultiIndex rather # than a pair of tuples self.assertTrue(isinstance(df.set_index( [df.index, df.index]).index, MultiIndex)) # Check equality tm.assert_index_equal(df.set_index([df.index, df.index]).index, mi2) def test_rename_objects(self): renamed = self.mixed_frame.rename(columns=str.upper) self.assertIn('FOO', renamed) self.assertNotIn('foo', renamed) def test_assign_columns(self): self.frame['hi'] = 'there' frame = self.frame.copy() frame.columns = ['foo', 'bar', 'baz', 'quux', 'foo2'] assert_series_equal(self.frame['C'], frame['baz'], check_names=False) assert_series_equal(self.frame['hi'], frame['foo2'], check_names=False) def test_set_index_preserve_categorical_dtype(self): # GH13743, GH13854 df = DataFrame({'A': [1, 2, 1, 1, 2], 'B': [10, 16, 22, 28, 34], 'C1': pd.Categorical(list("abaab"), categories=list("bac"), ordered=False), 'C2': pd.Categorical(list("abaab"), categories=list("bac"), ordered=True)}) for cols in ['C1', 'C2', ['A', 'C1'], ['A', 'C2'], ['C1', 'C2']]: result = df.set_index(cols).reset_index() result = result.reindex(columns=df.columns) tm.assert_frame_equal(result, df)
# Copyright 2016 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Proximal stochastic dual coordinate ascent optimizer for linear models.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections from six.moves import range from tensorflow.contrib.lookup import lookup_ops from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework.load_library import load_op_library from tensorflow.python.framework.ops import convert_to_tensor from tensorflow.python.framework.ops import name_scope from tensorflow.python.ops import array_ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import data_flow_ops from tensorflow.python.ops import logging_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_ops from tensorflow.python.ops import state_ops from tensorflow.python.ops import string_ops from tensorflow.python.ops import variables as var_ops from tensorflow.python.ops.nn import sigmoid_cross_entropy_with_logits from tensorflow.python.ops.sdca_ops import sdca_fprint from tensorflow.python.ops.sdca_ops import sdca_optimizer from tensorflow.python.ops.sdca_ops import sdca_shrink_l1 __all__ = ['SdcaModel'] class _ShardedMutableHashTable(lookup_ops.LookupInterface): """A sharded version of MutableHashTable. It is designed to be interface compatible with LookupInterface and MutableHashTable, with the exception of the export method, which is replaced by a custom values_reduce_sum method for SDCA needs. The class is not part of lookup ops because it is unclear how to make the device placement general enough to be useful. The _ShardedHashTable keeps `num_shards` MutableHashTables internally. If keys are integers, the shard is computed via the modulo operation. If keys are strings, the shard is computed via string_to_hash_bucket_fast. """ # TODO(andreasst): consider moving this to lookup_ops def __init__(self, key_dtype, value_dtype, default_value, num_shards=1, name='ShardedMutableHashTable'): with ops.name_scope(name, 'sharded_mutable_hash_table') as scope: super(_ShardedMutableHashTable, self).__init__(key_dtype, value_dtype, scope) table_shards = [] for i in range(num_shards): table_shards.append(lookup_ops.MutableHashTable( key_dtype=key_dtype, value_dtype=value_dtype, default_value=default_value, name='%s-%d-of-%d' % (name, i + 1, num_shards))) self._table_shards = table_shards # TODO(andreasst): add a value_shape() method to LookupInterface # pylint: disable=protected-access self._value_shape = self._table_shards[0]._value_shape # pylint: enable=protected-access @property def _num_shards(self): return len(self._table_shards) @property def table_shards(self): return self._table_shards def size(self, name=None): with ops.name_scope(name, 'sharded_mutable_hash_table_size'): sizes = [ self._table_shards[i].size() for i in range(self._num_shards) ] return math_ops.add_n(sizes) def _shard_indices(self, keys): if self._key_dtype == dtypes.string: indices = string_ops.string_to_hash_bucket_fast(keys, self._num_shards) else: indices = math_ops.mod(keys, self._num_shards) return math_ops.cast(indices, dtypes.int32) def lookup(self, keys, name=None): if keys.dtype != self._key_dtype: raise TypeError('Signature mismatch. Keys must be dtype %s, got %s.' % (self._key_dtype, keys.dtype)) num_shards = self._num_shards if num_shards == 1: return self._table_shards[0].lookup(keys, name=name) shard_indices = self._shard_indices(keys) # TODO(andreasst): support 'keys' that are not vectors key_shards = data_flow_ops.dynamic_partition(keys, shard_indices, num_shards) value_shards = [ self._table_shards[i].lookup(key_shards[i], name=name) for i in range(num_shards) ] original_indices = math_ops.range(array_ops.size(keys)) partitioned_indices = data_flow_ops.dynamic_partition(original_indices, shard_indices, num_shards) result = data_flow_ops.dynamic_stitch(partitioned_indices, value_shards) result.set_shape(keys.get_shape().concatenate(self._value_shape)) return result def insert(self, keys, values, name=None): num_shards = self._num_shards if num_shards == 1: return self._table_shards[0].insert(keys, values, name=name) shard_indices = self._shard_indices(keys) # TODO(andreasst): support 'keys' that are not vectors key_shards = data_flow_ops.dynamic_partition(keys, shard_indices, num_shards) value_shards = data_flow_ops.dynamic_partition(values, shard_indices, num_shards) return_values = [ self._table_shards[i].insert(key_shards[i], value_shards[i], name=name) for i in range(num_shards) ] return control_flow_ops.group(*return_values) def export_sharded(self, name=None): """Returns lists of the keys and values tensors in the sharded table. Returns: A pair of lists with the first list containing the key tensors and the second list containing the value tensors from each shard. """ keys_list = [] values_list = [] for table_shard in self._table_shards: exported_keys, exported_values = table_shard.export(name=name) keys_list.append(exported_keys) values_list.append(exported_values) return keys_list, values_list class SparseFeatureColumn(object): """Represents a sparse feature column. Contains three tensors representing a sparse feature column, they are example indices (int64), feature indices (int64), and feature values (float). Feature weights are optional, and are treated as 1.0f if missing. For example, consider a batch of 4 examples, which contains the following features in a particular SparseFeatureColumn: Example 0: feature 5, value 1 Example 1: feature 6, value 1 and feature 10, value 0.5 Example 2: no features Example 3: two copies of feature 2, value 1 This SparseFeatureColumn will be represented as follows: <0, 5, 1> <1, 6, 1> <1, 10, 0.5> <3, 2, 1> <3, 2, 1> For a batch of 2 examples below: Example 0: feature 5 Example 1: feature 6 is represented by SparseFeatureColumn as: <0, 5, 1> <1, 6, 1> ``` @@__init__ @@example_indices @@feature_indices @@feature_values """ def __init__(self, example_indices, feature_indices, feature_values): """Creates a `SparseFeatureColumn` representation. Args: example_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. feature_indices: A 1-D int64 tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. feature_values: An optional 1-D tensor float tensor of shape `[N]`. Also, accepts python lists, or numpy arrays. Returns: A `SparseFeatureColumn` """ with name_scope(None, 'SparseFeatureColumn', [example_indices, feature_indices]): self._example_indices = convert_to_tensor(example_indices, name='example_indices', dtype=dtypes.int64) self._feature_indices = convert_to_tensor(feature_indices, name='feature_indices', dtype=dtypes.int64) self._feature_values = None if feature_values is not None: with name_scope(None, 'SparseFeatureColumn', [feature_values]): self._feature_values = convert_to_tensor(feature_values, name='feature_values', dtype=dtypes.float32) @property def example_indices(self): """The example indices represented as a dense tensor. Returns: A 1-D Tensor of int64 with shape `[N]`. """ return self._example_indices @property def feature_indices(self): """The feature indices represented as a dense tensor. Returns: A 1-D Tensor of int64 with shape `[N]`. """ return self._feature_indices @property def feature_values(self): """The feature values represented as a dense tensor. Returns: May return None, or a 1-D Tensor of float32 with shape `[N]`. """ return self._feature_values # TODO(sibyl-Aix6ihai): add name_scope to appropriate methods. class SdcaModel(object): """Stochastic dual coordinate ascent solver for linear models. This class currently only supports a single machine (multi-threaded) implementation. We expect the weights and duals to fit in a single machine. Loss functions supported: * Binary logistic loss * Squared loss * Hinge loss * Smooth hinge loss This class defines an optimizer API to train a linear model. ### Usage ```python # Create a solver with the desired parameters. lr = tf.contrib.linear_optimizer.SdcaModel(examples, variables, options) opt_op = lr.minimize() predictions = lr.predictions(examples) # Primal loss + L1 loss + L2 loss. regularized_loss = lr.regularized_loss(examples) # Primal loss only unregularized_loss = lr.unregularized_loss(examples) examples: { sparse_features: list of SparseFeatureColumn. dense_features: list of dense tensors of type float32. example_labels: a tensor of type float32 and shape [Num examples] example_weights: a tensor of type float32 and shape [Num examples] example_ids: a tensor of type string and shape [Num examples] } variables: { sparse_features_weights: list of tensors of shape [vocab size] dense_features_weights: list of tensors of shape [dense_feature_dimension] } options: { symmetric_l1_regularization: 0.0 symmetric_l2_regularization: 1.0 loss_type: "logistic_loss" num_loss_partitions: 1 (Optional, with default value of 1. Number of partitions of the global loss function, 1 means single machine solver, and >1 when we have more than one optimizer working concurrently.) num_table_shards: 1 (Optional, with default value of 1. Number of shards of the internal state table, typically set to match the number of parameter servers for large data sets. } ``` In the training program you will just have to run the returned Op from minimize(). ```python # Execute opt_op and train for num_steps. for _ in range(num_steps): opt_op.run() # You can also check for convergence by calling lr.approximate_duality_gap() ``` """ def __init__(self, examples, variables, options): """Create a new sdca optimizer.""" if not examples or not variables or not options: raise ValueError('examples, variables and options must all be specified.') supported_losses = ('logistic_loss', 'squared_loss', 'hinge_loss', 'smooth_hinge_loss') if options['loss_type'] not in supported_losses: raise ValueError('Unsupported loss_type: ', options['loss_type']) self._assertSpecified(['example_labels', 'example_weights', 'example_ids', 'sparse_features', 'dense_features'], examples) self._assertList(['sparse_features', 'dense_features'], examples) self._assertSpecified(['sparse_features_weights', 'dense_features_weights'], variables) self._assertList(['sparse_features_weights', 'dense_features_weights'], variables) self._assertSpecified(['loss_type', 'symmetric_l2_regularization', 'symmetric_l1_regularization'], options) for name in ['symmetric_l1_regularization', 'symmetric_l2_regularization']: value = options[name] if value < 0.0: raise ValueError('%s should be non-negative. Found (%f)' % (name, value)) self._examples = examples self._variables = variables self._options = options self._create_slots() self._hashtable = _ShardedMutableHashTable( key_dtype=dtypes.string, value_dtype=dtypes.float32, num_shards=self._num_table_shards(), default_value=[0.0, 0.0, 0.0, 0.0]) logging_ops.scalar_summary('approximate_duality_gap', self.approximate_duality_gap()) logging_ops.scalar_summary('examples_seen', self._hashtable.size()) def _symmetric_l1_regularization(self): return self._options['symmetric_l1_regularization'] def _symmetric_l2_regularization(self): # Algorithmic requirement (for now) is to have minimal l2 of 1.0. return max(self._options['symmetric_l2_regularization'], 1.0) def _num_loss_partitions(self): # Number of partitions of the global objective. # TODO(andreasst): set num_loss_partitions automatically based on the number # of workers return self._options.get('num_loss_partitions', 1) def _num_table_shards(self): # Number of hash table shards. # Return 1 if not specified or if the value is 'None' # TODO(andreasst): set num_table_shards automatically based on the number # of parameter servers num_shards = self._options.get('num_table_shards') return 1 if num_shards is None else num_shards # TODO(sibyl-Aix6ihai): Use optimizer interface to make use of slot creation logic. def _create_slots(self): # Make internal variables which have the updates before applying L1 # regularization. self._slots = collections.defaultdict(list) for name in ['sparse_features_weights', 'dense_features_weights']: for var in self._variables[name]: with ops.device(var.device): # TODO(andreasst): remove SDCAOptimizer suffix once bug 30843109 is # fixed self._slots['unshrinked_' + name].append(var_ops.Variable( array_ops.zeros_like(var.initialized_value(), dtypes.float32), name=var.op.name + '_unshrinked/SDCAOptimizer')) def _assertSpecified(self, items, check_in): for x in items: if check_in[x] is None: raise ValueError(check_in[x] + ' must be specified.') def _assertList(self, items, check_in): for x in items: if not isinstance(check_in[x], list): raise ValueError(x + ' must be a list.') def _l1_loss(self): """Computes the (un-normalized) l1 loss of the model.""" with name_scope('sdca/l1_loss'): sums = [] for name in ['sparse_features_weights', 'dense_features_weights']: for weights in self._convert_n_to_tensor(self._variables[name]): with ops.device(weights.device): sums.append( math_ops.reduce_sum( math_ops.abs(math_ops.cast(weights, dtypes.float64)))) sum = math_ops.add_n(sums) # SDCA L1 regularization cost is: l1 * sum(|weights|) return self._options['symmetric_l1_regularization'] * sum def _l2_loss(self, l2): """Computes the (un-normalized) l2 loss of the model.""" with name_scope('sdca/l2_loss'): sums = [] for name in ['sparse_features_weights', 'dense_features_weights']: for weights in self._convert_n_to_tensor(self._variables[name]): with ops.device(weights.device): sums.append( math_ops.reduce_sum( math_ops.square(math_ops.cast(weights, dtypes.float64)))) sum = math_ops.add_n(sums) # SDCA L2 regularization cost is: l2 * sum(weights^2) / 2 return l2 * sum / 2.0 def _convert_n_to_tensor(self, input_list, as_ref=False): """Converts input list to a set of tensors.""" return [convert_to_tensor(x, as_ref=as_ref) for x in input_list] def _linear_predictions(self, examples): """Returns predictions of the form w*x.""" with name_scope('sdca/prediction'): sparse_variables = self._convert_n_to_tensor(self._variables[ 'sparse_features_weights']) result = 0.0 for sfc, sv in zip(examples['sparse_features'], sparse_variables): # TODO(sibyl-Aix6ihai): following does not take care of missing features. result += math_ops.segment_sum( math_ops.mul( array_ops.gather(sv, sfc.feature_indices), sfc.feature_values), sfc.example_indices) dense_features = self._convert_n_to_tensor(examples['dense_features']) dense_variables = self._convert_n_to_tensor(self._variables[ 'dense_features_weights']) for i in range(len(dense_variables)): result += math_ops.matmul(dense_features[i], array_ops.expand_dims( dense_variables[i], -1)) # Reshaping to allow shape inference at graph construction time. return array_ops.reshape(result, [-1]) def predictions(self, examples): """Add operations to compute predictions by the model. If logistic_loss is being used, predicted probabilities are returned. Otherwise, (raw) linear predictions (w*x) are returned. Args: examples: Examples to compute predictions on. Returns: An Operation that computes the predictions for examples. Raises: ValueError: if examples are not well defined. """ self._assertSpecified( ['example_weights', 'sparse_features', 'dense_features'], examples) self._assertList(['sparse_features', 'dense_features'], examples) result = self._linear_predictions(examples) if self._options['loss_type'] == 'logistic_loss': # Convert logits to probability for logistic loss predictions. with name_scope('sdca/logistic_prediction'): result = math_ops.sigmoid(result) return result def minimize(self, global_step=None, name=None): """Add operations to train a linear model by minimizing the loss function. Args: global_step: Optional `Variable` to increment by one after the variables have been updated. name: Optional name for the returned operation. Returns: An Operation that updates the variables passed in the constructor. """ # Technically, the op depends on a lot more than the variables, # but we'll keep the list short. with name_scope(name, 'sdca/minimize'): sparse_example_indices = [] sparse_feature_indices = [] sparse_features_values = [] for sf in self._examples['sparse_features']: sparse_example_indices.append(sf.example_indices) sparse_feature_indices.append(sf.feature_indices) # If feature values are missing, sdca assumes a value of 1.0f. if sf.feature_values is not None: sparse_features_values.append(sf.feature_values) example_ids_hashed = sdca_fprint( convert_to_tensor(self._examples['example_ids'])) example_state_data = self._hashtable.lookup(example_ids_hashed) # Solver returns example_state_update, new delta sparse_feature_weights # and delta dense_feature_weights. weights_tensor = self._convert_n_to_tensor(self._slots[ 'unshrinked_sparse_features_weights']) sparse_weights = [] sparse_indices = [] for w, i in zip(weights_tensor, sparse_feature_indices): # Find the feature ids to lookup in the variables. with ops.device(w.device): sparse_indices.append( math_ops.cast( array_ops.unique(math_ops.cast(i, dtypes.int32))[0], dtypes.int64)) sparse_weights.append(array_ops.gather(w, sparse_indices[-1])) esu, sfw, dfw = sdca_optimizer( sparse_example_indices, sparse_feature_indices, sparse_features_values, self._convert_n_to_tensor(self._examples['dense_features']), convert_to_tensor(self._examples['example_weights']), convert_to_tensor(self._examples['example_labels']), sparse_indices, sparse_weights, self._convert_n_to_tensor(self._slots[ 'unshrinked_dense_features_weights']), example_state_data, loss_type=self._options['loss_type'], l1=self._options['symmetric_l1_regularization'], l2=self._symmetric_l2_regularization(), num_loss_partitions=self._num_loss_partitions(), num_inner_iterations=1) with ops.control_dependencies([esu]): update_ops = [self._hashtable.insert(example_ids_hashed, esu)] # Update the weights before the proximal step. for w, i, u in zip(self._slots['unshrinked_sparse_features_weights'], sparse_indices, sfw): update_ops.append(state_ops.scatter_add(w, i, u)) for w, u in zip(self._slots['unshrinked_dense_features_weights'], dfw): update_ops.append(w.assign_add(u)) with ops.control_dependencies(update_ops): update_ops = [] # Copy over unshrinked weights to user provided variables. for i, name in enumerate( ['sparse_features_weights', 'dense_features_weights']): for var, slot_var in zip(self._variables[name], self._slots['unshrinked_' + name]): update_ops.append(var.assign(slot_var)) update_group = control_flow_ops.group(*update_ops) # Apply proximal step. with ops.control_dependencies([update_group]): shrink_ops = [] for name in ['sparse_features_weights', 'dense_features_weights']: for var in self._variables[name]: with ops.device(var.device): shrink_ops.append( sdca_shrink_l1( self._convert_n_to_tensor( [var], as_ref=True), l1=self._symmetric_l1_regularization(), l2=self._symmetric_l2_regularization())) shrink_l1 = control_flow_ops.group(*shrink_ops) if not global_step: return shrink_l1 with ops.control_dependencies([shrink_l1]): return state_ops.assign_add(global_step, 1, name=name).op def approximate_duality_gap(self): """Add operations to compute the approximate duality gap. Returns: An Operation that computes the approximate duality gap over all examples. """ with name_scope('sdca/approximate_duality_gap'): _, values_list = self._hashtable.export_sharded() shard_sums = [] for values in values_list: with ops.device(values.device): shard_sums.append( math_ops.reduce_sum(math_ops.cast(values, dtypes.float64), 0)) summed_values = math_ops.add_n(shard_sums) primal_loss = summed_values[1] dual_loss = summed_values[2] example_weights = summed_values[3] # Note: we return NaN if there are no weights or all weights are 0, e.g. # if no examples have been processed return (primal_loss + dual_loss + self._l1_loss() + (2.0 * self._l2_loss(self._symmetric_l2_regularization())) ) / example_weights def unregularized_loss(self, examples): """Add operations to compute the loss (without the regularization loss). Args: examples: Examples to compute unregularized loss on. Returns: An Operation that computes mean (unregularized) loss for given set of examples. Raises: ValueError: if examples are not well defined. """ self._assertSpecified(['example_labels', 'example_weights', 'sparse_features', 'dense_features'], examples) self._assertList(['sparse_features', 'dense_features'], examples) with name_scope('sdca/unregularized_loss'): predictions = math_ops.cast( self._linear_predictions(examples), dtypes.float64) labels = math_ops.cast( convert_to_tensor(examples['example_labels']), dtypes.float64) weights = math_ops.cast( convert_to_tensor(examples['example_weights']), dtypes.float64) if self._options['loss_type'] == 'logistic_loss': return math_ops.reduce_sum(math_ops.mul( sigmoid_cross_entropy_with_logits(predictions, labels), weights)) / math_ops.reduce_sum(weights) if self._options['loss_type'] in ['hinge_loss', 'smooth_hinge_loss']: # hinge_loss = max{0, 1 - y_i w*x} where y_i \in {-1, 1}. So, we need to # first convert 0/1 labels into -1/1 labels. all_ones = array_ops.ones_like(predictions) adjusted_labels = math_ops.sub(2 * labels, all_ones) # Tensor that contains (unweighted) error (hinge loss) per # example. error = nn_ops.relu(math_ops.sub(all_ones, math_ops.mul(adjusted_labels, predictions))) weighted_error = math_ops.mul(error, weights) return math_ops.reduce_sum(weighted_error) / math_ops.reduce_sum( weights) # squared loss err = math_ops.sub(labels, predictions) weighted_squared_err = math_ops.mul(math_ops.square(err), weights) # SDCA squared loss function is sum(err^2) / (2*sum(weights)) return (math_ops.reduce_sum(weighted_squared_err) / (2.0 * math_ops.reduce_sum(weights))) def regularized_loss(self, examples): """Add operations to compute the loss with regularization loss included. Args: examples: Examples to compute loss on. Returns: An Operation that computes mean (regularized) loss for given set of examples. Raises: ValueError: if examples are not well defined. """ self._assertSpecified(['example_labels', 'example_weights', 'sparse_features', 'dense_features'], examples) self._assertList(['sparse_features', 'dense_features'], examples) with name_scope('sdca/regularized_loss'): weights = convert_to_tensor(examples['example_weights']) return (( self._l1_loss() + # Note that here we are using the raw regularization # (as specified by the user) and *not* # self._symmetric_l2_regularization(). self._l2_loss(self._options['symmetric_l2_regularization'])) / math_ops.reduce_sum(math_ops.cast(weights, dtypes.float64)) + self.unregularized_loss(examples))
from __future__ import print_function, unicode_literals import re from aspen import Response import pytest from gratipay.security.user import SESSION from gratipay.testing import Harness from gratipay.wireup import find_files overescaping_re = re.compile(r'&amp;(#[0-9]{4}|[a-z]+);') class TestPages(Harness): def browse(self, setup=None, **kw): alice = self.make_participant('alice', claimed_time='now') exchange_id = self.make_exchange('braintree-cc', 19, 0, alice) if setup: setup(alice) i = len(self.client.www_root) urls = [] for spt in find_files(self.client.www_root, '*.spt'): url = spt[i:-4].replace('/%team/', '/alice/') \ .replace('/alice/%sub', '/alice/foo') \ .replace('/~/%username/', '/~alice/') \ .replace('/for/%slug/', '/for/wonderland/') \ .replace('/%platform/', '/github/') \ .replace('/%user_name/', '/gratipay/') \ .replace('/%membername', '/alan') \ .replace('/%exchange_id.int', '/%s' % exchange_id) \ .replace('/%redirect_to', '/giving') \ .replace('/%endpoint', '/public') \ .replace('/about/me/%sub', '/about/me') assert '/%' not in url if 'index' in url.split('/')[-1]: url = url.rsplit('/', 1)[0] + '/' urls.append(url) urls.extend(""" /about/me /about/me/ /about/me/history """.split()) for url in urls: try: r = self.client.GET(url, **kw) except Response as r: if r.code == 404 or r.code >= 500: raise assert r.code != 404 assert r.code < 500 assert not overescaping_re.search(r.body.decode('utf8')) def test_anon_can_browse(self): self.browse() def test_new_participant_can_browse(self): self.browse(auth_as='alice') def test_on_the_fence_can_browse(self): def setup(alice): alice.update_is_free_rider(None) self.browse(setup, auth_as='alice') @pytest.mark.xfail(reason="migrating to Teams; #3399") def test_username_is_in_button(self): self.make_participant('alice', claimed_time='now') self.make_participant('bob', claimed_time='now') body = self.client.GET('/~alice/', auth_as='bob').body assert '<span class="zero">Give to alice</span>' in body @pytest.mark.xfail(reason="migrating to Teams; #3399") def test_username_is_in_unauth_giving_cta(self): self.make_participant('alice', claimed_time='now') body = self.client.GET('/~alice/').body assert 'give to alice' in body def test_widget(self): self.make_participant('cheese', claimed_time='now') expected = "javascript: window.open" actual = self.client.GET('/~cheese/widget.html').body assert expected in actual def test_github_associate(self): assert self.client.GxT('/on/github/associate').code == 400 def test_twitter_associate(self): assert self.client.GxT('/on/twitter/associate').code == 400 def test_about(self): expected = "We provide voluntary" actual = self.client.GET('/about/').body assert expected in actual def test_about_stats(self): expected = "Gratipay processes" actual = self.client.GET('/about/stats').body assert expected in actual def test_about_charts(self): assert self.client.GxT('/about/charts.html').code == 302 def test_about_teams_redirect(self): assert self.client.GxT('/about/teams/').code == 302 assert self.client.GxT('/about/features/teams/').code == 302 def test_about_payments(self): assert "Payments" in self.client.GET('/about/features/payments').body.decode('utf8') def test_about_payroll(self): assert "Payroll" in self.client.GET('/about/features/payroll').body.decode('utf8') def test_404(self): response = self.client.GET('/about/four-oh-four.html', raise_immediately=False) assert "Not Found" in response.body assert "{%" not in response.body def test_for_contributors_redirects_to_inside_gratipay(self): loc = self.client.GxT('/for/contributors/').headers['Location'] assert loc == 'http://inside.gratipay.com/' def test_mission_statement_also_redirects(self): assert self.client.GxT('/for/contributors/mission-statement.html').code == 302 def test_anonymous_sign_out_redirects(self): response = self.client.PxST('/sign-out.html') assert response.code == 302 assert response.headers['Location'] == '/' def test_sign_out_overwrites_session_cookie(self): self.make_participant('alice') response = self.client.PxST('/sign-out.html', auth_as='alice') assert response.code == 302 assert response.headers.cookie[SESSION].value == '' def test_sign_out_doesnt_redirect_xhr(self): self.make_participant('alice') response = self.client.PxST('/sign-out.html', auth_as='alice', HTTP_X_REQUESTED_WITH=b'XMLHttpRequest') assert response.code == 200 def test_settings_page_available_balance(self): self.make_participant('alice', claimed_time='now') self.db.run("UPDATE participants SET balance = 123.00 WHERE username = 'alice'") actual = self.client.GET("/~alice/settings/", auth_as="alice").body expected = "123" assert expected in actual def test_giving_page(self): self.make_team(is_approved=True) alice = self.make_participant('alice', claimed_time='now') alice.set_payment_instruction('TheEnterprise', "1.00") assert "The Enterprise" in self.client.GET("/~alice/giving/", auth_as="alice").body def test_giving_page_shows_cancelled(self): self.make_team(is_approved=True) alice = self.make_participant('alice', claimed_time='now') alice.set_payment_instruction('TheEnterprise', "1.00") alice.set_payment_instruction('TheEnterprise', "0.00") assert "Cancelled" in self.client.GET("/~alice/giving/", auth_as="alice").body def test_new_participant_can_edit_profile(self): self.make_participant('alice', claimed_time='now') body = self.client.GET("/~alice/", auth_as="alice").body assert b'Edit' in body def test_tilde_slash_redirects_to_tilde(self): self.make_participant('alice', claimed_time='now') response = self.client.GxT("/~/alice/", auth_as="alice") assert response.code == 302 assert response.headers['Location'] == '/~alice/' def test_tilde_slash_redirects_subpages_with_querystring_to_tilde(self): self.make_participant('alice', claimed_time='now') response = self.client.GxT("/~/alice/foo/bar?baz=buz", auth_as="alice") assert response.code == 302 assert response.headers['Location'] == '/~alice/foo/bar?baz=buz' def test_username_redirected_to_tilde(self): self.make_participant('alice', claimed_time='now') response = self.client.GxT("/alice/", auth_as="alice") assert response.code == 302 assert response.headers['Location'] == '/~alice/' def test_username_redirects_everything_to_tilde(self): self.make_participant('alice', claimed_time='now') response = self.client.GxT("/alice/foo/bar?baz=buz", auth_as="alice") assert response.code == 302 assert response.headers['Location'] == '/~alice/foo/bar?baz=buz' def test_team_slug__not__redirected_from_tilde(self): self.make_team(is_approved=True) assert self.client.GET("/TheEnterprise/").code == 200 assert self.client.GxT("/~TheEnterprise/").code == 404 def test_security_headers_sets_x_frame_options(self): headers = self.client.GET('/about/').headers assert headers['X-Frame-Options'] == 'SAMEORIGIN' def test_security_headers_sets_x_content_type_options(self): headers = self.client.GET('/about/').headers assert headers['X-Content-Type-Options'] == 'nosniff' def test_security_headers_sets_x_xss_protection(self): headers = self.client.GET('/about/').headers assert headers['X-XSS-Protection'] == '1; mode=block'
""" Zappa Async Tasks Example: ``` from zappa.async import task @task(service='sns') def my_async_func(*args, **kwargs): dosomething() ``` For SNS, you can also pass an `arn` argument to task() which will specify which SNS path to send it to. Without `service='sns'`, the default service is 'lambda' which will call the method in an asynchronous lambda call. The following restrictions apply: * function must have a clean import path -- i.e. no closures, lambdas, or methods. * args and kwargs must be JSON-serializable. * The JSON-serialized form must be within the size limits for Lambda (128K) or SNS (256K) events. Discussion of this comes from: https://github.com/Miserlou/Zappa/issues/61 https://github.com/Miserlou/Zappa/issues/603 https://github.com/Miserlou/Zappa/pull/694 https://github.com/Miserlou/Zappa/pull/732 https://github.com/Miserlou/Zappa/issues/840 ## Full lifetime of an asynchronous dispatch: 1. In a file called `foo.py`, there is the following code: ``` from zappa.async import task @task def my_async_func(*args, **kwargs): return sum(args) ``` 2. The decorator desugars to: `my_async_func = task(my_async_func)` 3. Somewhere else, the code runs: `res = my_async_func(1,2)` really calls task's `_run_async(1,2)` with `func` equal to the original `my_async_func` If we are running in Lambda, this runs: LambdaAsyncResponse().send('foo.my_async_func', (1,2), {}) and returns the LambdaAsyncResponse instance to the local context. That local context, can, e.g. test for `res.sent` to confirm it was dispatched correctly. 4. LambdaAsyncResponse.send invoked the currently running AWS Lambda instance with the json message: ``` { "command": "zappa.async.route_lambda_task", "task_path": "foo.my_async_func", "args": [1,2], "kwargs": {} } ``` 5. The new lambda instance is invoked with the message above, and Zappa runs its usual bootstrapping context, and inside zappa.handler, the existance of the 'command' key in the message dispatches the full message to zappa.async.route_lambda_task, which in turn calls `run_message(message)` 6. `run_message` loads the task_path value to load the `func` from `foo.py`. We should note that my_async_func is wrapped by @task in this new context, as well. However, @task also decorated `my_async_func.sync()` to run the original function synchronously. `run_message` duck-types the method and finds the `.sync` attribute and runs that instead -- thus we do not infinitely dispatch. If `my_async_func` had code to dispatch other functions inside its synchronous portions (or even call itself recursively), those *would* be dispatched asynchronously, unless, of course, they were called by: `my_async_func.sync(1,2)` in which case it would run synchronously and in the current lambda function. """ import boto3 import botocore from functools import update_wrapper, wraps import importlib import inspect import json import os import uuid import time from .utilities import get_topic_name try: from zappa_settings import ASYNC_RESPONSE_TABLE except ImportError: ASYNC_RESPONSE_TABLE = None # Declare these here so they're kept warm. try: aws_session = boto3.Session() LAMBDA_CLIENT = aws_session.client('lambda') SNS_CLIENT = aws_session.client('sns') STS_CLIENT = aws_session.client('sts') DYNAMODB_CLIENT = aws_session.client('dynamodb') except botocore.exceptions.NoRegionError as e: # pragma: no cover # This can happen while testing on Travis, but it's taken care of # during class initialization. pass ## # Response and Exception classes ## class AsyncException(Exception): # pragma: no cover """ Simple exception class for async tasks. """ pass class LambdaAsyncResponse(object): """ Base Response Dispatcher class Can be used directly or subclassed if the method to send the message is changed. """ def __init__(self, lambda_function_name=None, aws_region=None, capture_response=False, **kwargs): """ """ if kwargs.get('boto_session'): self.client = kwargs.get('boto_session').client('lambda') else: # pragma: no cover self.client = LAMBDA_CLIENT self.lambda_function_name = lambda_function_name self.aws_region = aws_region if capture_response: if ASYNC_RESPONSE_TABLE is None: print( "Warning! Attempted to capture a response without " "async_response_table configured in settings (you won't " "capture async responses)." ) capture_response = False self.response_id = "MISCONFIGURED" else: self.response_id = str(uuid.uuid4()) else: self.response_id = None self.capture_response = capture_response def send(self, task_path, args, kwargs): """ Create the message object and pass it to the actual sender. """ message = { 'task_path': task_path, 'capture_response': self.capture_response, 'response_id': self.response_id, 'args': args, 'kwargs': kwargs } self._send(message) return self def _send(self, message): """ Given a message, directly invoke the lamdba function for this task. """ message['command'] = 'zappa.async.route_lambda_task' payload = json.dumps(message).encode('utf-8') if len(payload) > 128000: # pragma: no cover raise AsyncException("Payload too large for async Lambda call") self.response = self.client.invoke( FunctionName=self.lambda_function_name, InvocationType='Event', #makes the call async Payload=payload ) self.sent = (self.response.get('StatusCode', 0) == 202) class SnsAsyncResponse(LambdaAsyncResponse): """ Send a SNS message to a specified SNS topic Serialise the func path and arguments """ def __init__(self, lambda_function_name=None, aws_region=None, capture_response=False, **kwargs): self.lambda_function_name = lambda_function_name self.aws_region=aws_region if kwargs.get('boto_session'): self.client = kwargs.get('boto_session').client('sns') else: # pragma: no cover self.client = SNS_CLIENT if kwargs.get('arn'): self.arn = kwargs.get('arn') else: if kwargs.get('boto_session'): sts_client = kwargs.get('boto_session').client('sts') else: sts_client = STS_CLIENT AWS_ACCOUNT_ID = sts_client.get_caller_identity()['Account'] self.arn = 'arn:aws:sns:{region}:{account}:{topic_name}'.format( region=self.aws_region, account=AWS_ACCOUNT_ID, topic_name=get_topic_name(self.lambda_function_name) ) self.capture_response = capture_response if capture_response: self.response_id = str(uuid.uuid4()) def _send(self, message): """ Given a message, publish to this topic. """ message['command'] = 'zappa.async.route_sns_task' payload = json.dumps(message).encode('utf-8') if len(payload) > 256000: # pragma: no cover raise AsyncException("Payload too large for SNS") self.response = self.client.publish( TargetArn=self.arn, Message=payload ) self.sent = self.response.get('MessageId') ## # Aync Routers ## ASYNC_CLASSES = { 'lambda': LambdaAsyncResponse, 'sns': SnsAsyncResponse, } def route_lambda_task(event, context): """ Deserialises the message from event passed to zappa.handler.run_function imports the function, calls the function with args """ message = event return run_message(message) def route_sns_task(event, context): """ Gets SNS Message, deserialises the message, imports the function, calls the function with args """ record = event['Records'][0] message = json.loads( record['Sns']['Message'] ) return run_message(message) def run_message(message): """ Runs a function defined by a message object with keys: 'task_path', 'args', and 'kwargs' used by lambda routing and a 'command' in handler.py """ if message.get('capture_response', False): DYNAMODB_CLIENT.put_item( TableName=ASYNC_RESPONSE_TABLE, Item={ 'id': {'S': str(message['response_id'])}, 'ttl': {'N': str(int(time.time()+600))}, 'async_status': {'S': 'in progress'}, 'async_response': {'S': str(json.dumps('N/A'))}, } ) func = import_and_get_task(message['task_path']) if hasattr(func, 'sync'): response = func.sync( *message['args'], **message['kwargs'] ) else: response = func( *message['args'], **message['kwargs'] ) if message.get('capture_response', False): DYNAMODB_CLIENT.update_item( TableName=ASYNC_RESPONSE_TABLE, Key={'id': {'S': str(message['response_id'])}}, UpdateExpression="SET async_response = :r, async_status = :s", ExpressionAttributeValues={ ':r': {'S': str(json.dumps(response))}, ':s': {'S': 'complete'}, }, ) return response ## # Execution interfaces and classes ## def run(func, args=[], kwargs={}, service='lambda', capture_response=False, remote_aws_lambda_function_name=None, remote_aws_region=None, **task_kwargs): """ Instead of decorating a function with @task, you can just run it directly. If you were going to do func(*args, **kwargs), then you will call this: import zappa.async.run zappa.async.run(func, args, kwargs) If you want to use SNS, then do: zappa.async.run(func, args, kwargs, service='sns') and other arguments are similar to @task """ lambda_function_name = remote_aws_lambda_function_name or os.environ.get('AWS_LAMBDA_FUNCTION_NAME') aws_region = remote_aws_region or os.environ.get('AWS_REGION') task_path = get_func_task_path(func) return ASYNC_CLASSES[service](lambda_function_name=lambda_function_name, aws_region=aws_region, capture_response=capture_response, **task_kwargs).send(task_path, args, kwargs) # Handy: # http://stackoverflow.com/questions/10294014/python-decorator-best-practice-using-a-class-vs-a-function # However, this needs to pass inspect.getargspec() in handler.py which does not take classes # Wrapper written to take optional arguments # http://chase-seibert.github.io/blog/2013/12/17/python-decorator-optional-parameter.html def task(*args, **kwargs): """Async task decorator so that running Args: func (function): the function to be wrapped Further requirements: func must be an independent top-level function. i.e. not a class method or an anonymous function service (str): either 'lambda' or 'sns' remote_aws_lambda_function_name (str): the name of a remote lambda function to call with this task remote_aws_region (str): the name of a remote region to make lambda/sns calls against Returns: A replacement function that dispatches func() to run asynchronously through the service in question """ func = None if len(args) == 1 and callable(args[0]): func = args[0] if func: # Default Values service = 'lambda' lambda_function_name = os.environ.get('AWS_LAMBDA_FUNCTION_NAME') aws_region = os.environ.get('AWS_REGION') else: # Arguments were passed service = kwargs.get('service', 'lambda') lambda_function_name = kwargs.get('remote_aws_lambda_function_name') or os.environ.get('AWS_LAMBDA_FUNCTION_NAME') aws_region = kwargs.get('remote_aws_region') or os.environ.get('AWS_REGION') capture_response = kwargs.get('capture_response', False) def func_wrapper(func): task_path = get_func_task_path(func) @wraps(func) def _run_async(*args, **kwargs): """ This is the wrapping async function that replaces the function that is decorated with @task. Args: These are just passed through to @task's func Assuming a valid service is passed to task() and it is run inside a Lambda process (i.e. AWS_LAMBDA_FUNCTION_NAME exists), it dispatches the function to be run through the service variable. Otherwise, it runs the task synchronously. Returns: In async mode, the object returned includes state of the dispatch. For instance When outside of Lambda, the func passed to @task is run and we return the actual value. """ if (service in ASYNC_CLASSES) and (lambda_function_name): send_result = ASYNC_CLASSES[service](lambda_function_name=lambda_function_name, aws_region=aws_region, capture_response=capture_response).send(task_path, args, kwargs) return send_result else: return func(*args, **kwargs) update_wrapper(_run_async, func) _run_async.service = service _run_async.sync = func return _run_async return func_wrapper(func) if func else func_wrapper def task_sns(func): """ SNS-based task dispatcher. Functions the same way as task() """ return task(func, service='sns') ## # Utility Functions ## def import_and_get_task(task_path): """ Given a modular path to a function, import that module and return the function. """ module, function = task_path.rsplit('.', 1) app_module = importlib.import_module(module) app_function = getattr(app_module, function) return app_function def get_func_task_path(func): """ Format the modular task path for a function via inspection. """ module_path = inspect.getmodule(func).__name__ task_path = '{module_path}.{func_name}'.format( module_path=module_path, func_name=func.__name__ ) return task_path def get_async_response(response_id): response = DYNAMODB_CLIENT.get_item( TableName=ASYNC_RESPONSE_TABLE, Key={'id': {'S': str(response_id)}} ) if 'Item' not in response: return None return { 'status': response['Item']['async_status']['S'], 'response': json.loads(response['Item']['async_response']['S']), }
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Core Keras layers. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import copy import types as python_types import numpy as np from tensorflow.python.framework import tensor_shape from tensorflow.python.keras._impl.keras import activations from tensorflow.python.keras._impl.keras import backend as K from tensorflow.python.keras._impl.keras import constraints from tensorflow.python.keras._impl.keras import initializers from tensorflow.python.keras._impl.keras import regularizers from tensorflow.python.keras._impl.keras.engine import InputSpec from tensorflow.python.keras._impl.keras.engine import Layer from tensorflow.python.keras._impl.keras.utils.generic_utils import deserialize_keras_object from tensorflow.python.keras._impl.keras.utils.generic_utils import func_dump from tensorflow.python.keras._impl.keras.utils.generic_utils import func_load from tensorflow.python.keras._impl.keras.utils.generic_utils import has_arg from tensorflow.python.layers import core as tf_core_layers class Masking(Layer): """Masks a sequence by using a mask value to skip timesteps. For each timestep in the input tensor (dimension #1 in the tensor), if all values in the input tensor at that timestep are equal to `mask_value`, then the timestep will be masked (skipped) in all downstream layers (as long as they support masking). If any downstream layer does not support masking yet receives such an input mask, an exception will be raised. Example: Consider a Numpy data array `x` of shape `(samples, timesteps, features)`, to be fed to a LSTM layer. You want to mask timestep #3 and #5 because you lack data for these timesteps. You can: - set `x[:, 3, :] = 0.` and `x[:, 5, :] = 0.` - insert a `Masking` layer with `mask_value=0.` before the LSTM layer: ```python model = Sequential() model.add(Masking(mask_value=0., input_shape=(timesteps, features))) model.add(LSTM(32)) ``` """ def __init__(self, mask_value=0., **kwargs): super(Masking, self).__init__(**kwargs) self.supports_masking = True self.mask_value = mask_value def compute_mask(self, inputs, mask=None): return K.any(K.not_equal(inputs, self.mask_value), axis=-1) def call(self, inputs): boolean_mask = K.any( K.not_equal(inputs, self.mask_value), axis=-1, keepdims=True) return inputs * K.cast(boolean_mask, inputs.dtype) def get_config(self): config = {'mask_value': self.mask_value} base_config = super(Masking, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Dropout(tf_core_layers.Dropout, Layer): """Applies Dropout to the input. Dropout consists in randomly setting a fraction `rate` of input units to 0 at each update during training time, which helps prevent overfitting. Arguments: rate: float between 0 and 1. Fraction of the input units to drop. noise_shape: 1D integer tensor representing the shape of the binary dropout mask that will be multiplied with the input. For instance, if your inputs have shape `(batch_size, timesteps, features)` and you want the dropout mask to be the same for all timesteps, you can use `noise_shape=(batch_size, 1, features)`. seed: A Python integer to use as random seed. """ def __init__(self, rate, noise_shape=None, seed=None, **kwargs): self.supports_masking = True # Inheritance call order: # 1) tf.layers.Dropout, 2) keras.layers.Layer, 3) tf.layers.Layer super(Dropout, self).__init__(rate=rate, noise_shape=noise_shape, seed=seed, **kwargs) def call(self, inputs, training=None): if training is None: training = K.learning_phase() output = super(Dropout, self).call(inputs, training=training) if training is K.learning_phase(): output._uses_learning_phase = True # pylint: disable=protected-access return output def get_config(self): config = {'rate': self.rate} base_config = super(Dropout, self).get_config() return dict(list(base_config.items()) + list(config.items())) class SpatialDropout1D(Dropout): """Spatial 1D version of Dropout. This version performs the same function as Dropout, however it drops entire 1D feature maps instead of individual elements. If adjacent frames within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout1D will help promote independence between feature maps and should be used instead. Arguments: rate: float between 0 and 1. Fraction of the input units to drop. Input shape: 3D tensor with shape: `(samples, timesteps, channels)` Output shape: Same as input References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) """ def __init__(self, rate, **kwargs): super(SpatialDropout1D, self).__init__(rate, **kwargs) self.input_spec = InputSpec(ndim=3) def _get_noise_shape(self, inputs): input_shape = K.shape(inputs) noise_shape = (input_shape[0], 1, input_shape[2]) return noise_shape class SpatialDropout2D(Dropout): """Spatial 2D version of Dropout. This version performs the same function as Dropout, however it drops entire 2D feature maps instead of individual elements. If adjacent pixels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout2D will help promote independence between feature maps and should be used instead. Arguments: rate: float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension (the depth) is at index 1, in 'channels_last' mode is it at index 3. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: 4D tensor with shape: `(samples, channels, rows, cols)` if data_format='channels_first' or 4D tensor with shape: `(samples, rows, cols, channels)` if data_format='channels_last'. Output shape: Same as input References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) """ def __init__(self, rate, data_format=None, **kwargs): super(SpatialDropout2D, self).__init__(rate, **kwargs) if data_format is None: data_format = K.image_data_format() if data_format not in {'channels_last', 'channels_first'}: raise ValueError('data_format must be in ' '{"channels_last", "channels_first"}') self.data_format = data_format self.input_spec = InputSpec(ndim=4) def _get_noise_shape(self, inputs): input_shape = K.shape(inputs) if self.data_format == 'channels_first': return (input_shape[0], input_shape[1], 1, 1) elif self.data_format == 'channels_last': return (input_shape[0], 1, 1, input_shape[3]) class SpatialDropout3D(Dropout): """Spatial 3D version of Dropout. This version performs the same function as Dropout, however it drops entire 3D feature maps instead of individual elements. If adjacent voxels within feature maps are strongly correlated (as is normally the case in early convolution layers) then regular dropout will not regularize the activations and will otherwise just result in an effective learning rate decrease. In this case, SpatialDropout3D will help promote independence between feature maps and should be used instead. Arguments: rate: float between 0 and 1. Fraction of the input units to drop. data_format: 'channels_first' or 'channels_last'. In 'channels_first' mode, the channels dimension (the depth) is at index 1, in 'channels_last' mode is it at index 4. It defaults to the `image_data_format` value found in your Keras config file at `~/.keras/keras.json`. If you never set it, then it will be "channels_last". Input shape: 5D tensor with shape: `(samples, channels, dim1, dim2, dim3)` if data_format='channels_first' or 5D tensor with shape: `(samples, dim1, dim2, dim3, channels)` if data_format='channels_last'. Output shape: Same as input References: - [Efficient Object Localization Using Convolutional Networks](https://arxiv.org/abs/1411.4280) """ def __init__(self, rate, data_format=None, **kwargs): super(SpatialDropout3D, self).__init__(rate, **kwargs) if data_format is None: data_format = K.image_data_format() if data_format not in {'channels_last', 'channels_first'}: raise ValueError('data_format must be in ' '{"channels_last", "channels_first"}') self.data_format = data_format self.input_spec = InputSpec(ndim=5) def _get_noise_shape(self, inputs): input_shape = K.shape(inputs) if self.data_format == 'channels_first': return (input_shape[0], input_shape[1], 1, 1, 1) elif self.data_format == 'channels_last': return (input_shape[0], 1, 1, 1, input_shape[4]) class Activation(Layer): """Applies an activation function to an output. Arguments: activation: name of activation function to use or alternatively, a Theano or TensorFlow operation. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, activation, **kwargs): super(Activation, self).__init__(**kwargs) self.supports_masking = True self.activation = activations.get(activation) def call(self, inputs): return self.activation(inputs) def get_config(self): config = {'activation': activations.serialize(self.activation)} base_config = super(Activation, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Reshape(Layer): """Reshapes an output to a certain shape. Arguments: target_shape: target shape. Tuple of integers, does not include the samples dimension (batch size). Input shape: Arbitrary, although all dimensions in the input shaped must be fixed. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: `(batch_size,) + target_shape` Example: ```python # as first layer in a Sequential model model = Sequential() model.add(Reshape((3, 4), input_shape=(12,))) # now: model.output_shape == (None, 3, 4) # note: `None` is the batch dimension # as intermediate layer in a Sequential model model.add(Reshape((6, 2))) # now: model.output_shape == (None, 6, 2) # also supports shape inference using `-1` as dimension model.add(Reshape((-1, 2, 2))) # now: model.output_shape == (None, 3, 2, 2) ``` """ def __init__(self, target_shape, **kwargs): super(Reshape, self).__init__(**kwargs) self.target_shape = tuple(target_shape) def _fix_unknown_dimension(self, input_shape, output_shape): """Find and replace a missing dimension in an output shape. This is a near direct port of the internal Numpy function `_fix_unknown_dimension` in `numpy/core/src/multiarray/shape.c` Arguments: input_shape: shape of array being reshaped output_shape: desired shape of the array with at most a single -1 which indicates a dimension that should be derived from the input shape. Returns: The new output shape with a -1 replaced with its computed value. Raises a ValueError if the total array size of the output_shape is different then the input_shape, or more than one unknown dimension is specified. Raises: ValueError: in case of invalid values for `input_shape` or `input_shape`. """ output_shape = list(output_shape) msg = 'total size of new array must be unchanged' known, unknown = 1, None for index, dim in enumerate(output_shape): if dim < 0: if unknown is None: unknown = index else: raise ValueError('Can only specify one unknown dimension.') else: known *= dim original = np.prod(input_shape, dtype=int) if unknown is not None: if known == 0 or original % known != 0: raise ValueError(msg) output_shape[unknown] = original // known elif original != known: raise ValueError(msg) return output_shape def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape = [input_shape[0]] output_shape += self._fix_unknown_dimension(input_shape[1:], self.target_shape) return tensor_shape.TensorShape(output_shape) def call(self, inputs): # In case the target shape is not fully defined, # we need access to the shape of x. target_shape = self.target_shape if -1 in target_shape: # target shape not fully defined target_shape = self._compute_output_shape(inputs.get_shape()) target_shape = target_shape.as_list()[1:] return K.reshape(inputs, (-1,) + tuple(target_shape)) def get_config(self): config = {'target_shape': self.target_shape} base_config = super(Reshape, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Permute(Layer): """Permutes the dimensions of the input according to a given pattern. Useful for e.g. connecting RNNs and convnets together. Example: ```python model = Sequential() model.add(Permute((2, 1), input_shape=(10, 64))) # now: model.output_shape == (None, 64, 10) # note: `None` is the batch dimension ``` Arguments: dims: Tuple of integers. Permutation pattern, does not include the samples dimension. Indexing starts at 1. For instance, `(2, 1)` permutes the first and second dimension of the input. Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same as the input shape, but with the dimensions re-ordered according to the specified pattern. """ def __init__(self, dims, **kwargs): super(Permute, self).__init__(**kwargs) self.dims = tuple(dims) self.input_spec = InputSpec(ndim=len(self.dims) + 1) def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() output_shape = copy.copy(input_shape) for i, dim in enumerate(self.dims): target_dim = input_shape[dim] output_shape[i + 1] = target_dim return tensor_shape.TensorShape(output_shape) def call(self, inputs): return K.permute_dimensions(inputs, (0,) + self.dims) def get_config(self): config = {'dims': self.dims} base_config = super(Permute, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Flatten(tf_core_layers.Flatten, Layer): """Flattens the input. Does not affect the batch size. Example: ```python model = Sequential() model.add(Convolution2D(64, 3, 3, border_mode='same', input_shape=(3, 32, 32))) # now: model.output_shape == (None, 64, 32, 32) model.add(Flatten()) # now: model.output_shape == (None, 65536) ``` """ pass class RepeatVector(Layer): """Repeats the input n times. Example: ```python model = Sequential() model.add(Dense(32, input_dim=32)) # now: model.output_shape == (None, 32) # note: `None` is the batch dimension model.add(RepeatVector(3)) # now: model.output_shape == (None, 3, 32) ``` Arguments: n: integer, repetition factor. Input shape: 2D tensor of shape `(num_samples, features)`. Output shape: 3D tensor of shape `(num_samples, n, features)`. """ def __init__(self, n, **kwargs): super(RepeatVector, self).__init__(**kwargs) self.n = n self.input_spec = InputSpec(ndim=2) def _compute_output_shape(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() return tensor_shape.TensorShape([input_shape[0], self.n, input_shape[1]]) def call(self, inputs): return K.repeat(inputs, self.n) def get_config(self): config = {'n': self.n} base_config = super(RepeatVector, self).get_config() return dict(list(base_config.items()) + list(config.items())) class Lambda(Layer): """Wraps arbitrary expression as a `Layer` object. Examples: ```python # add a x -> x^2 layer model.add(Lambda(lambda x: x ** 2)) ``` ```python # add a layer that returns the concatenation # of the positive part of the input and # the opposite of the negative part def antirectifier(x): x -= K.mean(x, axis=1, keepdims=True) x = K.l2_normalize(x, axis=1) pos = K.relu(x) neg = K.relu(-x) return K.concatenate([pos, neg], axis=1) model.add(Lambda(antirectifier)) ``` Arguments: function: The function to be evaluated. Takes input tensor as first argument. arguments: optional dictionary of keyword arguments to be passed to the function. Input shape: Arbitrary. Use the keyword argument input_shape (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Specified by `output_shape` argument (or auto-inferred when using TensorFlow). """ def __init__(self, function, mask=None, arguments=None, **kwargs): super(Lambda, self).__init__(**kwargs) self.function = function self.arguments = arguments if arguments else {} if mask is not None: self.supports_masking = True self.mask = mask def call(self, inputs, mask=None): arguments = self.arguments if has_arg(self.function, 'mask'): arguments['mask'] = mask return self.function(inputs, **arguments) def compute_mask(self, inputs, mask=None): if callable(self.mask): return self.mask(inputs, mask) return self.mask def get_config(self): if isinstance(self.function, python_types.LambdaType): function = func_dump(self.function) function_type = 'lambda' else: function = self.function.__name__ function_type = 'function' config = { 'function': function, 'function_type': function_type, 'arguments': self.arguments } base_config = super(Lambda, self).get_config() return dict(list(base_config.items()) + list(config.items())) @classmethod def from_config(cls, config, custom_objects=None): globs = globals() if custom_objects: globs = dict(list(globs.items()) + list(custom_objects.items())) function_type = config.pop('function_type') if function_type == 'function': # Simple lookup in custom objects function = deserialize_keras_object( config['function'], custom_objects=custom_objects, printable_module_name='function in Lambda layer') elif function_type == 'lambda': # Unsafe deserialization from bytecode function = func_load(config['function'], globs=globs) else: raise TypeError('Unknown function type:', function_type) # If arguments were numpy array, they have been saved as # list. We need to recover the ndarray if 'arguments' in config: for key in config['arguments']: if isinstance(config['arguments'][key], dict): arg_dict = config['arguments'][key] if 'type' in arg_dict and arg_dict['type'] == 'ndarray': # Overwrite the argument with its numpy translation config['arguments'][key] = np.array(arg_dict['value']) config['function'] = function return cls(**config) class Dense(tf_core_layers.Dense, Layer): """Just your regular densely-connected NN layer. `Dense` implements the operation: `output = activation(dot(input, kernel) + bias)` where `activation` is the element-wise activation function passed as the `activation` argument, `kernel` is a weights matrix created by the layer, and `bias` is a bias vector created by the layer (only applicable if `use_bias` is `True`). Note: if the input to the layer has a rank greater than 2, then it is flattened prior to the initial dot product with `kernel`. Example: ```python # as first layer in a sequential model: model = Sequential() model.add(Dense(32, input_shape=(16,))) # now the model will take as input arrays of shape (*, 16) # and output arrays of shape (*, 32) # after the first layer, you don't need to specify # the size of the input anymore: model.add(Dense(32)) ``` Arguments: units: Positive integer, dimensionality of the output space. activation: Activation function to use. If you don't specify anything, no activation is applied (ie. "linear" activation: `a(x) = x`). use_bias: Boolean, whether the layer uses a bias vector. kernel_initializer: Initializer for the `kernel` weights matrix. bias_initializer: Initializer for the bias vector. kernel_regularizer: Regularizer function applied to the `kernel` weights matrix. bias_regularizer: Regularizer function applied to the bias vector. activity_regularizer: Regularizer function applied to the output of the layer (its "activation").. kernel_constraint: Constraint function applied to the `kernel` weights matrix. bias_constraint: Constraint function applied to the bias vector. Input shape: nD tensor with shape: `(batch_size, ..., input_dim)`. The most common situation would be a 2D input with shape `(batch_size, input_dim)`. Output shape: nD tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D input with shape `(batch_size, input_dim)`, the output would have shape `(batch_size, units)`. """ def __init__(self, units, activation=None, use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', kernel_regularizer=None, bias_regularizer=None, activity_regularizer=None, kernel_constraint=None, bias_constraint=None, **kwargs): if 'input_shape' not in kwargs and 'input_dim' in kwargs: kwargs['input_shape'] = (kwargs.pop('input_dim'),) # Inheritance call order: # 1) tf.layers.Dense, 2) keras.layers.Layer, 3) tf.layers.Layer super(Dense, self).__init__( units, activation=activations.get(activation), use_bias=use_bias, kernel_initializer=initializers.get(kernel_initializer), bias_initializer=initializers.get(bias_initializer), kernel_regularizer=regularizers.get(kernel_regularizer), bias_regularizer=regularizers.get(bias_regularizer), activity_regularizer=regularizers.get(activity_regularizer), kernel_constraint=constraints.get(kernel_constraint), bias_constraint=constraints.get(bias_constraint), **kwargs) self.supports_masking = True def get_config(self): config = { 'units': self.units, 'activation': activations.serialize(self.activation), 'use_bias': self.use_bias, 'kernel_initializer': initializers.serialize(self.kernel_initializer), 'bias_initializer': initializers.serialize(self.bias_initializer), 'kernel_regularizer': regularizers.serialize(self.kernel_regularizer), 'bias_regularizer': regularizers.serialize(self.bias_regularizer), 'activity_regularizer': regularizers.serialize(self.activity_regularizer), 'kernel_constraint': constraints.serialize(self.kernel_constraint), 'bias_constraint': constraints.serialize(self.bias_constraint) } base_config = super(Dense, self).get_config() return dict(list(base_config.items()) + list(config.items())) class ActivityRegularization(Layer): """Layer that applies an update to the cost function based input activity. Arguments: l1: L1 regularization factor (positive float). l2: L2 regularization factor (positive float). Input shape: Arbitrary. Use the keyword argument `input_shape` (tuple of integers, does not include the samples axis) when using this layer as the first layer in a model. Output shape: Same shape as input. """ def __init__(self, l1=0., l2=0., **kwargs): super(ActivityRegularization, self).__init__(**kwargs) self.supports_masking = True self.l1 = l1 self.l2 = l2 self.activity_regularizer = regularizers.L1L2(l1=l1, l2=l2) def get_config(self): config = {'l1': self.l1, 'l2': self.l2} base_config = super(ActivityRegularization, self).get_config() return dict(list(base_config.items()) + list(config.items()))
# Copyright 2022 The MT3 Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """TensorBoard summaries and utilities.""" from typing import Any, Mapping, Optional, Sequence, Tuple import librosa from mt3 import note_sequences from mt3 import spectrograms import note_seq from note_seq import midi_synth from note_seq import sequences_lib from note_seq.protobuf import music_pb2 import numpy as np import seqio _DEFAULT_AUDIO_SECONDS = 30.0 _DEFAULT_PIANOROLL_FRAMES_PER_SECOND = 15 # TODO(iansimon): pick a SoundFont; for some reason the default is all organ def _extract_example_audio( examples: Sequence[Mapping[str, Any]], sample_rate: float, num_seconds: float, audio_key: str = 'raw_inputs' ) -> np.ndarray: """Extract audio from examples. Args: examples: List of examples containing raw audio. sample_rate: Number of samples per second. num_seconds: Number of seconds of audio to include. audio_key: Dictionary key for the raw audio. Returns: An n-by-num_samples numpy array of samples. """ n = len(examples) num_samples = round(num_seconds * sample_rate) all_samples = np.zeros([n, num_samples]) for i, ex in enumerate(examples): samples = ex[audio_key][:num_samples] all_samples[i, :len(samples)] = samples return all_samples def _example_to_note_sequence( example: Mapping[str, Sequence[float]], ns_feature_name: str, note_onset_feature_name: str, note_offset_feature_name: str, note_frequency_feature_name: str, note_confidence_feature_name: str, num_seconds: float ) -> music_pb2.NoteSequence: """Extract NoteSequence from example.""" if ns_feature_name: ns = example[ns_feature_name] else: onset_times = np.array(example[note_onset_feature_name]) pitches = librosa.hz_to_midi( example[note_frequency_feature_name]).round().astype(int) assert len(onset_times) == len(pitches) if note_offset_feature_name or note_confidence_feature_name: offset_times = ( example[note_offset_feature_name] if note_offset_feature_name else onset_times + note_sequences.DEFAULT_NOTE_DURATION ) assert len(onset_times) == len(offset_times) confidences = (np.array(example[note_confidence_feature_name]) if note_confidence_feature_name else None) velocities = np.ceil( note_seq.MAX_MIDI_VELOCITY * confidences if confidences is not None else note_sequences.DEFAULT_VELOCITY * np.ones_like(onset_times) ).astype(int) assert len(onset_times) == len(velocities) ns = note_sequences.note_arrays_to_note_sequence( onset_times=onset_times, offset_times=offset_times, pitches=pitches, velocities=velocities) else: ns = note_sequences.note_arrays_to_note_sequence( onset_times=onset_times, pitches=pitches) return sequences_lib.trim_note_sequence(ns, 0, num_seconds) def _synthesize_example_notes( examples: Sequence[Mapping[str, Sequence[float]]], ns_feature_name: str, note_onset_feature_name: str, note_offset_feature_name: str, note_frequency_feature_name: str, note_confidence_feature_name: str, sample_rate: float, num_seconds: float, ) -> np.ndarray: """Synthesize example notes to audio. Args: examples: List of example dictionaries, containing either serialized NoteSequence protos or note onset times and pitches. ns_feature_name: Name of serialized NoteSequence feature. note_onset_feature_name: Name of note onset times feature. note_offset_feature_name: Name of note offset times feature. note_frequency_feature_name: Name of note frequencies feature. note_confidence_feature_name: Name of note confidences (velocities) feature. sample_rate: Sample rate at which to synthesize. num_seconds: Number of seconds to synthesize for each example. Returns: An n-by-num_samples numpy array of samples. """ if (ns_feature_name is not None) == (note_onset_feature_name is not None): raise ValueError( 'must specify exactly one of NoteSequence feature and onset feature') n = len(examples) num_samples = round(num_seconds * sample_rate) all_samples = np.zeros([n, num_samples]) for i, ex in enumerate(examples): ns = _example_to_note_sequence( ex, ns_feature_name=ns_feature_name, note_onset_feature_name=note_onset_feature_name, note_offset_feature_name=note_offset_feature_name, note_frequency_feature_name=note_frequency_feature_name, note_confidence_feature_name=note_confidence_feature_name, num_seconds=num_seconds) fluidsynth = midi_synth.fluidsynth samples = fluidsynth(ns, sample_rate=sample_rate) if len(samples) > num_samples: samples = samples[:num_samples] all_samples[i, :len(samples)] = samples return all_samples def _examples_to_pianorolls( targets: Sequence[Mapping[str, Sequence[float]]], predictions: Sequence[Mapping[str, Sequence[float]]], ns_feature_suffix: str, note_onset_feature_suffix: str, note_offset_feature_suffix: str, note_frequency_feature_suffix: str, note_confidence_feature_suffix: str, track_specs: Optional[Sequence[note_sequences.TrackSpec]], num_seconds: float, frames_per_second: float ) -> Tuple[np.ndarray, np.ndarray]: """Generate pianoroll images from example notes. Args: targets: List of target dictionaries, containing either serialized NoteSequence protos or note onset times and pitches. predictions: List of prediction dictionaries, containing either serialized NoteSequence protos or note onset times and pitches. ns_feature_suffix: Suffix of serialized NoteSequence feature. note_onset_feature_suffix: Suffix of note onset times feature. note_offset_feature_suffix: Suffix of note offset times feature. note_frequency_feature_suffix: Suffix of note frequencies feature. note_confidence_feature_suffix: Suffix of note confidences (velocities) feature. track_specs: Optional list of TrackSpec objects to indicate a set of tracks into which each NoteSequence should be split. Tracks will be stacked vertically in the pianorolls num_seconds: Number of seconds to show for each example. frames_per_second: Number of pianoroll frames per second. Returns: onset_pianorolls: An n-by-num_pitches-by-num_frames-by-4 numpy array of pianoroll images showing only onsets. full_pianorolls: An n-by-num_pitches-by-num_frames-by-4 numpy array of pianoroll images. """ if (ns_feature_suffix is not None) == (note_onset_feature_suffix is not None): raise ValueError( 'must specify exactly one of NoteSequence feature and onset feature') def ex_to_ns(example, prefix): return _example_to_note_sequence( example=example, ns_feature_name=(prefix + ns_feature_suffix if ns_feature_suffix else None), note_onset_feature_name=(prefix + note_onset_feature_suffix if note_onset_feature_suffix else None), note_offset_feature_name=(prefix + note_offset_feature_suffix if note_offset_feature_suffix else None), note_frequency_feature_name=( prefix + note_frequency_feature_suffix if note_frequency_feature_suffix else None), note_confidence_feature_name=( prefix + note_confidence_feature_suffix if note_confidence_feature_suffix else None), num_seconds=num_seconds) n = len(targets) num_pitches = note_seq.MAX_MIDI_PITCH - note_seq.MIN_MIDI_PITCH + 1 num_frames = round(num_seconds * frames_per_second) num_tracks = len(track_specs) if track_specs else 1 pianoroll_height = num_tracks * num_pitches + (num_tracks - 1) onset_images = np.zeros([n, pianoroll_height, num_frames, 3]) full_images = np.zeros([n, pianoroll_height, num_frames, 3]) for i, (target, pred) in enumerate(zip(targets, predictions)): target_ns, pred_ns = [ ex_to_ns(ex, prefix) for (ex, prefix) in [(target, 'ref_'), (pred, 'est_')] ] # Show lines at frame boundaries. To ensure that these lines are drawn with # the same downsampling and frame selection logic as the real NoteSequences, # use this hack to draw the lines with a NoteSequence that contains notes # across all pitches at all frame start times. start_times_ns = note_seq.NoteSequence() start_times_ns.CopyFrom(target_ns) del start_times_ns.notes[:] for start_time in pred['start_times']: if start_time < target_ns.total_time: for pitch in range( note_seq.MIN_MIDI_PITCH, note_seq.MAX_MIDI_PITCH + 1): start_times_ns.notes.add( pitch=pitch, velocity=100, start_time=start_time, end_time=start_time + (1 / frames_per_second)) start_time_roll = sequences_lib.sequence_to_pianoroll( start_times_ns, frames_per_second=frames_per_second, min_pitch=note_seq.MIN_MIDI_PITCH, max_pitch=note_seq.MAX_MIDI_PITCH, onset_mode='length_ms') num_start_time_frames = min(len(start_time_roll.onsets), num_frames) if track_specs is not None: target_tracks = [note_sequences.extract_track(target_ns, spec.program, spec.is_drum) for spec in track_specs] pred_tracks = [note_sequences.extract_track(pred_ns, spec.program, spec.is_drum) for spec in track_specs] else: target_tracks = [target_ns] pred_tracks = [pred_ns] for j, (target_track, pred_track) in enumerate(zip(target_tracks[::-1], pred_tracks[::-1])): target_roll = sequences_lib.sequence_to_pianoroll( target_track, frames_per_second=frames_per_second, min_pitch=note_seq.MIN_MIDI_PITCH, max_pitch=note_seq.MAX_MIDI_PITCH, onset_mode='length_ms') pred_roll = sequences_lib.sequence_to_pianoroll( pred_track, frames_per_second=frames_per_second, min_pitch=note_seq.MIN_MIDI_PITCH, max_pitch=note_seq.MAX_MIDI_PITCH, onset_mode='length_ms') num_target_frames = min(len(target_roll.onsets), num_frames) num_pred_frames = min(len(pred_roll.onsets), num_frames) start_offset = j * (num_pitches + 1) end_offset = (j + 1) * (num_pitches + 1) - 1 # Onsets onset_images[ i, start_offset:end_offset, :num_start_time_frames, 0 ] = start_time_roll.onsets[:num_start_time_frames, :].T onset_images[ i, start_offset:end_offset, :num_target_frames, 1 ] = target_roll.onsets[:num_target_frames, :].T onset_images[ i, start_offset:end_offset, :num_pred_frames, 2 ] = pred_roll.onsets[:num_pred_frames, :].T # Full notes full_images[ i, start_offset:end_offset, :num_start_time_frames, 0 ] = start_time_roll.onsets[:num_start_time_frames, :].T full_images[ i, start_offset:end_offset, :num_target_frames, 1 ] = target_roll.active[:num_target_frames, :].T full_images[ i, start_offset:end_offset, :num_pred_frames, 2 ] = pred_roll.active[:num_pred_frames, :].T # Add separator between tracks. if j < num_tracks - 1: onset_images[i, end_offset, :, 0] = 1 full_images[i, end_offset, :, 0] = 1 return onset_images[:, ::-1, :, :], full_images[:, ::-1, :, :] def prettymidi_pianoroll( track_pianorolls: Mapping[str, Sequence[Tuple[np.ndarray, np.ndarray]]], fps: float, num_seconds=_DEFAULT_AUDIO_SECONDS ) -> Mapping[str, seqio.metrics.MetricValue]: """Create summary from given pianorolls.""" max_len = int(num_seconds * fps) summaries = {} for inst_name, all_prs in track_pianorolls.items(): est_prs, ref_prs = zip(*all_prs) bs = len(ref_prs) pianoroll_image_batch = np.zeros(shape=(bs, 128, max_len, 3)) for i in range(bs): ref_pr = ref_prs[i][:, :max_len] est_pr = est_prs[i][:, :max_len] pianoroll_image_batch[i, :, :est_pr.shape[1], 2] = est_pr pianoroll_image_batch[i, :, :ref_pr.shape[1], 1] = ref_pr if not inst_name: inst_name = 'all instruments' summaries[f'{inst_name} pretty_midi pianoroll'] = seqio.metrics.Image( image=pianoroll_image_batch, max_outputs=bs) return summaries def audio_summaries( targets: Sequence[Mapping[str, Sequence[float]]], predictions: Sequence[Mapping[str, Sequence[float]]], spectrogram_config: spectrograms.SpectrogramConfig, num_seconds: float = _DEFAULT_AUDIO_SECONDS ) -> Mapping[str, seqio.metrics.MetricValue]: """Compute audio summaries for a list of examples. Args: targets: List of targets, unused as we pass the input audio tokens via predictions. predictions: List of predictions, including input audio tokens. spectrogram_config: Spectrogram configuration. num_seconds: Number of seconds of audio to include in the summaries. Longer audio will be cropped (from the beginning), shorter audio will be padded with silence (at the end). Returns: A dictionary mapping "audio" to the audio summaries. """ del targets samples = _extract_example_audio( examples=predictions, sample_rate=spectrogram_config.sample_rate, num_seconds=num_seconds) return { 'audio': seqio.metrics.Audio( audiodata=samples[:, :, np.newaxis], sample_rate=spectrogram_config.sample_rate, max_outputs=samples.shape[0]) } def transcription_summaries( targets: Sequence[Mapping[str, Sequence[float]]], predictions: Sequence[Mapping[str, Sequence[float]]], spectrogram_config: spectrograms.SpectrogramConfig, ns_feature_suffix: Optional[str] = None, note_onset_feature_suffix: Optional[str] = None, note_offset_feature_suffix: Optional[str] = None, note_frequency_feature_suffix: Optional[str] = None, note_confidence_feature_suffix: Optional[str] = None, track_specs: Optional[Sequence[note_sequences.TrackSpec]] = None, num_seconds: float = _DEFAULT_AUDIO_SECONDS, pianoroll_frames_per_second: float = _DEFAULT_PIANOROLL_FRAMES_PER_SECOND, ) -> Mapping[str, seqio.metrics.MetricValue]: """Compute note transcription summaries for multiple examples. Args: targets: List of targets containing ground truth. predictions: List of predictions, including raw input audio. spectrogram_config: The spectrogram configuration. ns_feature_suffix: Suffix of serialized NoteSequence feature. note_onset_feature_suffix: Suffix of note onset times feature. note_offset_feature_suffix: Suffix of note offset times feature. note_frequency_feature_suffix: Suffix of note frequencies feature. note_confidence_feature_suffix: Suffix of note confidences (velocities) feature. track_specs: Optional list of TrackSpec objects to indicate a set of tracks into which each NoteSequence should be split. num_seconds: Number of seconds of audio to include in the summaries. Longer audio will be cropped (from the beginning), shorter audio will be padded with silence (at the end). pianoroll_frames_per_second: Temporal resolution of pianoroll images. Returns: A dictionary of input, ground truth, and transcription summaries. """ audio_samples = _extract_example_audio( examples=predictions, sample_rate=spectrogram_config.sample_rate, num_seconds=num_seconds) def synthesize(examples, prefix): return _synthesize_example_notes( examples=examples, ns_feature_name=(prefix + ns_feature_suffix if ns_feature_suffix else None), note_onset_feature_name=(prefix + note_onset_feature_suffix if note_onset_feature_suffix else None), note_offset_feature_name=(prefix + note_offset_feature_suffix if note_offset_feature_suffix else None), note_frequency_feature_name=( prefix + note_frequency_feature_suffix if note_frequency_feature_suffix else None), note_confidence_feature_name=( prefix + note_confidence_feature_suffix if note_confidence_feature_suffix else None), sample_rate=spectrogram_config.sample_rate, num_seconds=num_seconds) synthesized_predictions = synthesize(predictions, 'est_') onset_pianoroll_images, full_pianoroll_images = _examples_to_pianorolls( targets=targets, predictions=predictions, ns_feature_suffix=ns_feature_suffix, note_onset_feature_suffix=note_onset_feature_suffix, note_offset_feature_suffix=note_offset_feature_suffix, note_frequency_feature_suffix=note_frequency_feature_suffix, note_confidence_feature_suffix=note_confidence_feature_suffix, track_specs=track_specs, num_seconds=num_seconds, frames_per_second=pianoroll_frames_per_second) return { 'input_with_transcription': seqio.metrics.Audio( audiodata=np.stack([audio_samples, synthesized_predictions], axis=2), sample_rate=spectrogram_config.sample_rate, max_outputs=audio_samples.shape[0]), 'pianoroll': seqio.metrics.Image( image=full_pianoroll_images, max_outputs=full_pianoroll_images.shape[0]), 'onset_pianoroll': seqio.metrics.Image( image=onset_pianoroll_images, max_outputs=onset_pianoroll_images.shape[0]), }
#!/usr/bin/env python # # Use the raw transactions API to spend bitcoins received on particular addresses, # and send any change back to that same address. # # Example usage: # spendfrom.py # Lists available funds # spendfrom.py --from=ADDRESS --to=ADDRESS --amount=11.00 # # Assumes it will talk to a bitcoind or Bitcoin-Qt running # on localhost. # # Depends on jsonrpc # from decimal import * import getpass import math import os import os.path import platform import sys import time from jsonrpc import ServiceProxy, json BASE_FEE=Decimal("0.001") def check_json_precision(): """Make sure json library being used does not lose precision converting BTC values""" n = Decimal("20000000.00000003") satoshis = int(json.loads(json.dumps(float(n)))*1.0e8) if satoshis != 2000000000000003: raise RuntimeError("JSON encode/decode loses precision") def determine_db_dir(): """Return the default location of the bitcoin data directory""" if platform.system() == "Darwin": return os.path.expanduser("~/Library/Application Support/Bitcoin/") elif platform.system() == "Windows": return os.path.join(os.environ['APPDATA'], "Bitcoin") return os.path.expanduser("~/.bitcoin") def read_bitcoin_config(dbdir): """Read the bitcoin.conf file from dbdir, returns dictionary of settings""" from ConfigParser import SafeConfigParser class FakeSecHead(object): def __init__(self, fp): self.fp = fp self.sechead = '[all]\n' def readline(self): if self.sechead: try: return self.sechead finally: self.sechead = None else: s = self.fp.readline() if s.find('#') != -1: s = s[0:s.find('#')].strip() +"\n" return s config_parser = SafeConfigParser() config_parser.readfp(FakeSecHead(open(os.path.join(dbdir, "bitcoin.conf")))) return dict(config_parser.items("all")) def connect_JSON(config): """Connect to a bitcoin JSON-RPC server""" testnet = config.get('testnet', '0') testnet = (int(testnet) > 0) # 0/1 in config file, convert to True/False if not 'rpcport' in config: config['rpcport'] = 30416 if testnet else 3044 connect = "http://%s:%s@127.0.0.1:%s"%(config['rpcuser'], config['rpcpassword'], config['rpcport']) try: result = ServiceProxy(connect) # ServiceProxy is lazy-connect, so send an RPC command mostly to catch connection errors, # but also make sure the bitcoind we're talking to is/isn't testnet: if result.getmininginfo()['testnet'] != testnet: sys.stderr.write("RPC server at "+connect+" testnet setting mismatch\n") sys.exit(1) return result except: sys.stderr.write("Error connecting to RPC server at "+connect+"\n") sys.exit(1) def unlock_wallet(bitcoind): info = bitcoind.getinfo() if 'unlocked_until' not in info: return True # wallet is not encrypted t = int(info['unlocked_until']) if t <= time.time(): try: passphrase = getpass.getpass("Wallet is locked; enter passphrase: ") bitcoind.walletpassphrase(passphrase, 5) except: sys.stderr.write("Wrong passphrase\n") info = bitcoind.getinfo() return int(info['unlocked_until']) > time.time() def list_available(bitcoind): address_summary = dict() address_to_account = dict() for info in bitcoind.listreceivedbyaddress(0): address_to_account[info["address"]] = info["account"] unspent = bitcoind.listunspent(0) for output in unspent: # listunspent doesn't give addresses, so: rawtx = bitcoind.getrawtransaction(output['txid'], 1) vout = rawtx["vout"][output['vout']] pk = vout["scriptPubKey"] # This code only deals with ordinary pay-to-bitcoin-address # or pay-to-script-hash outputs right now; anything exotic is ignored. if pk["type"] != "pubkeyhash" and pk["type"] != "scripthash": continue address = pk["addresses"][0] if address in address_summary: address_summary[address]["total"] += vout["value"] address_summary[address]["outputs"].append(output) else: address_summary[address] = { "total" : vout["value"], "outputs" : [output], "account" : address_to_account.get(address, "") } return address_summary def select_coins(needed, inputs): # Feel free to improve this, this is good enough for my simple needs: outputs = [] have = Decimal("0.0") n = 0 while have < needed and n < len(inputs): outputs.append({ "txid":inputs[n]["txid"], "vout":inputs[n]["vout"]}) have += inputs[n]["amount"] n += 1 return (outputs, have-needed) def create_tx(bitcoind, fromaddresses, toaddress, amount, fee): all_coins = list_available(bitcoind) total_available = Decimal("0.0") needed = amount+fee potential_inputs = [] for addr in fromaddresses: if addr not in all_coins: continue potential_inputs.extend(all_coins[addr]["outputs"]) total_available += all_coins[addr]["total"] if total_available < needed: sys.stderr.write("Error, only %f BTC available, need %f\n"%(total_available, needed)); sys.exit(1) # # Note: # Python's json/jsonrpc modules have inconsistent support for Decimal numbers. # Instead of wrestling with getting json.dumps() (used by jsonrpc) to encode # Decimals, I'm casting amounts to float before sending them to bitcoind. # outputs = { toaddress : float(amount) } (inputs, change_amount) = select_coins(needed, potential_inputs) if change_amount > BASE_FEE: # don't bother with zero or tiny change change_address = fromaddresses[-1] if change_address in outputs: outputs[change_address] += float(change_amount) else: outputs[change_address] = float(change_amount) rawtx = bitcoind.createrawtransaction(inputs, outputs) signed_rawtx = bitcoind.signrawtransaction(rawtx) if not signed_rawtx["complete"]: sys.stderr.write("signrawtransaction failed\n") sys.exit(1) txdata = signed_rawtx["hex"] return txdata def compute_amount_in(bitcoind, txinfo): result = Decimal("0.0") for vin in txinfo['vin']: in_info = bitcoind.getrawtransaction(vin['txid'], 1) vout = in_info['vout'][vin['vout']] result = result + vout['value'] return result def compute_amount_out(txinfo): result = Decimal("0.0") for vout in txinfo['vout']: result = result + vout['value'] return result def sanity_test_fee(bitcoind, txdata_hex, max_fee): class FeeError(RuntimeError): pass try: txinfo = bitcoind.decoderawtransaction(txdata_hex) total_in = compute_amount_in(bitcoind, txinfo) total_out = compute_amount_out(txinfo) if total_in-total_out > max_fee: raise FeeError("Rejecting transaction, unreasonable fee of "+str(total_in-total_out)) tx_size = len(txdata_hex)/2 kb = tx_size/1000 # integer division rounds down if kb > 1 and fee < BASE_FEE: raise FeeError("Rejecting no-fee transaction, larger than 1000 bytes") if total_in < 0.01 and fee < BASE_FEE: raise FeeError("Rejecting no-fee, tiny-amount transaction") # Exercise for the reader: compute transaction priority, and # warn if this is a very-low-priority transaction except FeeError as err: sys.stderr.write((str(err)+"\n")) sys.exit(1) def main(): import optparse parser = optparse.OptionParser(usage="%prog [options]") parser.add_option("--from", dest="fromaddresses", default=None, help="addresses to get bitcoins from") parser.add_option("--to", dest="to", default=None, help="address to get send bitcoins to") parser.add_option("--amount", dest="amount", default=None, help="amount to send") parser.add_option("--fee", dest="fee", default="0.0", help="fee to include") parser.add_option("--datadir", dest="datadir", default=determine_db_dir(), help="location of bitcoin.conf file with RPC username/password (default: %default)") parser.add_option("--testnet", dest="testnet", default=False, action="store_true", help="Use the test network") parser.add_option("--dry_run", dest="dry_run", default=False, action="store_true", help="Don't broadcast the transaction, just create and print the transaction data") (options, args) = parser.parse_args() check_json_precision() config = read_bitcoin_config(options.datadir) if options.testnet: config['testnet'] = True bitcoind = connect_JSON(config) if options.amount is None: address_summary = list_available(bitcoind) for address,info in address_summary.iteritems(): n_transactions = len(info['outputs']) if n_transactions > 1: print("%s %.8f %s (%d transactions)"%(address, info['total'], info['account'], n_transactions)) else: print("%s %.8f %s"%(address, info['total'], info['account'])) else: fee = Decimal(options.fee) amount = Decimal(options.amount) while unlock_wallet(bitcoind) == False: pass # Keep asking for passphrase until they get it right txdata = create_tx(bitcoind, options.fromaddresses.split(","), options.to, amount, fee) sanity_test_fee(bitcoind, txdata, amount*Decimal("0.01")) if options.dry_run: print(txdata) else: txid = bitcoind.sendrawtransaction(txdata) print(txid) if __name__ == '__main__': main()
# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. from oslo_log import log as logging from oslo_serialization import jsonutils from oslo_utils import encodeutils import six import webob.exc from wsme.rest import json from glance.api import policy from glance.api.v2.model.metadef_resource_type import ResourceType from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociation from glance.api.v2.model.metadef_resource_type import ResourceTypeAssociations from glance.api.v2.model.metadef_resource_type import ResourceTypes from glance.common import exception from glance.common import wsgi import glance.db import glance.gateway from glance.i18n import _ import glance.notifier import glance.schema LOG = logging.getLogger(__name__) class ResourceTypeController(object): def __init__(self, db_api=None, policy_enforcer=None, notifier=None): self.db_api = db_api or glance.db.get_api() self.policy = policy_enforcer or policy.Enforcer() self.notifier = notifier or glance.notifier.Notifier() self.gateway = glance.gateway.Gateway(db_api=self.db_api, notifier=self.notifier, policy_enforcer=self.policy) def index(self, req): try: filters = {'namespace': None} rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type_list = rs_type_repo.list(filters=filters) resource_type_list = [ResourceType.to_wsme_model( resource_type) for resource_type in db_resource_type_list] resource_types = ResourceTypes() resource_types.resource_types = resource_type_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata resource types " "index") raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError(e) return resource_types def show(self, req, namespace): try: filters = {'namespace': namespace} rs_type_repo = self.gateway.get_metadef_resource_type_repo( req.context) db_resource_type_list = rs_type_repo.list(filters=filters) resource_type_list = [ResourceTypeAssociation.to_wsme_model( resource_type) for resource_type in db_resource_type_list] resource_types = ResourceTypeAssociations() resource_types.resource_type_associations = resource_type_list except exception.Forbidden as e: LOG.debug("User not permitted to retrieve metadata resource types " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError(e) return resource_types def create(self, req, resource_type, namespace): rs_type_factory = self.gateway.get_metadef_resource_type_factory( req.context) rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) try: new_resource_type = rs_type_factory.new_resource_type( namespace=namespace, **resource_type.to_dict()) rs_type_repo.add(new_resource_type) except exception.Forbidden as e: LOG.debug("User not permitted to create metadata resource type " "within '%s' namespace", namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: raise webob.exc.HTTPNotFound(explanation=e.msg) except exception.Duplicate as e: raise webob.exc.HTTPConflict(explanation=e.msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() return ResourceTypeAssociation.to_wsme_model(new_resource_type) def delete(self, req, namespace, resource_type): rs_type_repo = self.gateway.get_metadef_resource_type_repo(req.context) try: filters = {} found = False filters['namespace'] = namespace db_resource_type_list = rs_type_repo.list(filters=filters) for db_resource_type in db_resource_type_list: if db_resource_type.name == resource_type: db_resource_type.delete() rs_type_repo.remove(db_resource_type) found = True if not found: raise exception.NotFound() except exception.Forbidden as e: LOG.debug("User not permitted to delete metadata resource type " "'%s' within '%s' namespace", resource_type, namespace) raise webob.exc.HTTPForbidden(explanation=e.msg) except exception.NotFound as e: msg = (_("Failed to find resource type %(resourcetype)s to " "delete") % {'resourcetype': resource_type}) LOG.error(msg) raise webob.exc.HTTPNotFound(explanation=msg) except Exception as e: LOG.error(encodeutils.exception_to_unicode(e)) raise webob.exc.HTTPInternalServerError() class RequestDeserializer(wsgi.JSONRequestDeserializer): _disallowed_properties = ['created_at', 'updated_at'] def __init__(self, schema=None): super(RequestDeserializer, self).__init__() self.schema = schema or get_schema() def _get_request_body(self, request): output = super(RequestDeserializer, self).default(request) if 'body' not in output: msg = _('Body expected in request.') raise webob.exc.HTTPBadRequest(explanation=msg) return output['body'] @classmethod def _check_allowed(cls, image): for key in cls._disallowed_properties: if key in image: msg = _("Attribute '%s' is read-only.") % key raise webob.exc.HTTPForbidden(explanation=msg) def create(self, request): body = self._get_request_body(request) self._check_allowed(body) try: self.schema.validate(body) except exception.InvalidObject as e: raise webob.exc.HTTPBadRequest(explanation=e.msg) resource_type = json.fromjson(ResourceTypeAssociation, body) return dict(resource_type=resource_type) class ResponseSerializer(wsgi.JSONResponseSerializer): def __init__(self, schema=None): super(ResponseSerializer, self).__init__() self.schema = schema def show(self, response, result): resource_type_json = json.tojson(ResourceTypeAssociations, result) body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def index(self, response, result): resource_type_json = json.tojson(ResourceTypes, result) body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def create(self, response, result): resource_type_json = json.tojson(ResourceTypeAssociation, result) response.status_int = 201 body = jsonutils.dumps(resource_type_json, ensure_ascii=False) response.unicode_body = six.text_type(body) response.content_type = 'application/json' def delete(self, response, result): response.status_int = 204 def _get_base_properties(): return { 'name': { 'type': 'string', 'description': _('Resource type names should be aligned with Heat ' 'resource types whenever possible: ' 'http://docs.openstack.org/developer/heat/' 'template_guide/openstack.html'), 'maxLength': 80, }, 'prefix': { 'type': 'string', 'description': _('Specifies the prefix to use for the given ' 'resource type. Any properties in the namespace ' 'should be prefixed with this prefix when being ' 'applied to the specified resource type. Must ' 'include prefix separator (e.g. a colon :).'), 'maxLength': 80, }, 'properties_target': { 'type': 'string', 'description': _('Some resource types allow more than one key / ' 'value pair per instance. For example, Cinder ' 'allows user and image metadata on volumes. Only ' 'the image properties metadata is evaluated by ' 'Nova (scheduling or drivers). This property ' 'allows a namespace target to remove the ' 'ambiguity.'), 'maxLength': 80, }, "created_at": { "type": "string", "readOnly": True, "description": _("Date and time of resource type association"), "format": "date-time" }, "updated_at": { "type": "string", "readOnly": True, "description": _("Date and time of the last resource type " "association modification"), "format": "date-time" } } def get_schema(): properties = _get_base_properties() mandatory_attrs = ResourceTypeAssociation.get_mandatory_attrs() schema = glance.schema.Schema( 'resource_type_association', properties, required=mandatory_attrs, ) return schema def get_collection_schema(): resource_type_schema = get_schema() return glance.schema.CollectionSchema('resource_type_associations', resource_type_schema) def create_resource(): """ResourceTypeAssociation resource factory method""" schema = get_schema() deserializer = RequestDeserializer(schema) serializer = ResponseSerializer(schema) controller = ResourceTypeController() return wsgi.Resource(controller, deserializer, serializer)
# Time-stamp: <2015-03-01 06:13:00 Ryo KOBAYASHI> """ Routines related to neural network. """ import math,time,os import numpy as np import multiprocessing as mp #.....import from local modules import fitpot #.....constants which may be different from those in fitpot.py _large= 1.0e+30 #.....global variables _l1st= True _nl = 1 _nsp= 1 _nsf= 0 _nhl1= 0 _nhl2= 0 _ergs= [] _frcs= [] _gsf= [] _dgsf=[] _hl1= [] _hl2= [] _aml= [] _bml= [] _cml= [] _fmatch= False _basedir='learning_set' _samples=[] _sample_dirs=[] _nprcs= 1 _ergrefs= [] _frcrefs= [] _fmethod= 'test' _parfile= 'in.params.NN' _runmode= 'serial' _rcut = 5.0 _params= [] _pranges= [] #============================================================ routines def init(*args,**kwargs): """ Initialize variables for the calculation of NN parameters. This should be called at the first place before any other NN-related routines. """ global _nl,_nsf,_nhl1,_nhl2,_gsf,_dgsf global _fmatch,_basedir,_samples,_sample_dirs,_nprcs global _ergrefs,_frcrefs,_fmethod,_parfile,_runmode,_rcut global _params,_pranges,_vranges _basedir = args[0] _params = args[1] _sample_dirs= args[2] _samples = args[3] _nprcs = args[4] _fmatch = args[5] _ergrefs = args[6] _frcrefs = args[7] _fmethod = args[8] _parfile = args[9] _runmode = args[10] _rcut = args[11] _pranges = args[12] _vranges = args[13] #.....read NN parameters f= open(_basedir+'/'+'in.const.NN') data= f.readline().split() _nl = int(data[0]) _nsp = int(data[1]) _nsfc= int(data[2]) _nhl1= int(data[3]) if _nl == 2: _nhl2= int(data[4]) n2= 0 n3= 0 for line in f.readlines(): if int(line.split()[0]) == 1: n2 += 1 elif int(line.split()[0]) == 2: n3 += 1 f.close() print ' Number of hidden layers =',_nl print ' Number of nodes in hidden-layer-1 =',_nhl1 if _nl == 2: print ' Number of nodes in hidden-layer-2 =',_nhl2 print ' Number of species =',_nsp print ' Number of 2-body symmetry functions =',n2 print ' Number of 3-body symmetry functions =',n3 # check ncmb2= _nsp +factorial(_nsp,2)/2 ncmb3= _nsp *ncmb2 _nsf= n2*ncmb2 +n3*ncmb3 print ' Number of symmetry functions =',_nsf if _nl == 1: nw= _nsf*_nhl1 +_nhl1 elif _nl == 2: nw= _nsf*_nhl1 +_nhl1*_nhl2 +_nhl2 if len(_params) != nw: print ' [Error] len(params) != nw' print ' len(params) = ',len(_params) print ' nsf = ',_nsf print ' nhl1 = ',_nhl1 if _nl == 2: print ' nhl2 = ',_nhl2 print ' nw = ',nw exit() #.....read bases #_gsf,_hl1,_aml,_bml= gather_basis(*args) _gsf,_dgsf= gather_bases_new(*args) def factorial(n,m): """ Returns factorial of n by m-times. """ if m <= 0: return 1 return n*factorial(n-1,m-1) def sigmoid(x): if x < -10.0: return 0.0 elif x > 10.0: return 1.0 return 1.0/(1.0 +math.exp(-x)) def vars2wgts(x): if _nl == 1: wgt1= np.zeros((_nhl1+1,_nsf+1)) wgt2= np.zeros(_nhl1+1) ix= 0 for isf in range(1,_nsf+1): for ihl1 in range(1,_nhl1+1): wgt1[ihl1,isf]= x[ix] ix += 1 for ihl1 in range(1,_nhl1+1): wgt2[ihl1]= x[ix] ix += 1 return (wgt1,wgt2) elif _nl == 2: wgt1= np.zeros((_nhl1+1,_nsf+1)) wgt2= np.zeros((_nhl2+1,_nhl1+1)) wgt3= np.zeros(_nhl2+1) ix= 0 for isf in range(1,_nsf+1): for ihl1 in range(1,_nhl1+1): wgt1[ihl1,isf]= x[ix] ix += 1 for ihl1 in range(1,_nhl1+1): for ihl2 in range(1,_nhl2+1): wgt2[ihl2,ihl1]= x[ix] ix += 1 for ihl2 in range(1,_nhl2+1): wgt3[ihl2]= x[ix] ix += 1 return (wgt1,wgt2,wgt3) def calc_ef_from_pmd(x,*args): dir= args[0] cwd= os.getcwd() # print ' cwd=',cwd # print ' dir=',dir #.....store original file os.system('cp '+dir+'/'+_parfile+' '+dir+'/'+_parfile+'.tmp') write_params(dir+'/'+_parfile,x) #.....run pmd in all sample directories os.chdir(dir) #print os.getcwd(),dir if _runmode in ('serial','Serial','SERIAL','sequential','single'): os.system('./serial_run_pmd.sh '+_parfile) elif _runmode in ('parallel','Parallel','PARALLEL'): os.system('python ./parallel_run_pmd.py '+_parfile) else: print "{0:*>20}: no such run_mode !!!".format(' Error', _runmode) exit() os.chdir(cwd) #.....restore original file os.system('cp '+dir+'/'+_parfile+' '+dir+'/'+_parfile+'.current') os.system('cp '+dir+'/'+_parfile+'.tmp'+' '+dir+'/'+_parfile) #.....gather pmd results ergs,frcs= gather_pmd_data(dir) return (ergs,frcs) def calc_ef_from_smd(x,*args): dir= args[0] cwd= os.getcwd() # print ' cwd=',cwd # print ' dir=',dir #.....store original file os.system('cp '+dir+'/'+_parfile+' '+dir+'/'+_parfile+'.tmp') write_params(dir+'/'+_parfile,x) #.....run smd in all sample directories os.chdir(dir) #print os.getcwd(),dir if _runmode in ('serial','Serial','SERIAL','sequential','single'): os.system('./serial_run_smd.sh '+_parfile) elif _runmode in ('parallel','Parallel','PARALLEL'): os.system('python ./parallel_run_smd.py '+_parfile) else: print "{0:*>20}: no such run_mode !!!".format(' Error', _runmode) exit() os.chdir(cwd) #.....restore original file os.system('cp '+dir+'/'+_parfile+' '+dir+'/'+_parfile+'.current') os.system('cp '+dir+'/'+_parfile+'.tmp'+' '+dir+'/'+_parfile) #.....gather smd results ergs,frcs= gather_smd_data(dir) return (ergs,frcs) def write_params(fname,x): params,pranges= fitpot.vars_to_params(x,_vranges,_params,_pranges) f=open(fname,'w') f.write(' {0:6d} {1:10.4f}\n'.format(len(params),_rcut)) for i in range(len(params)): if abs(pranges[i,0]) >= _large and abs(pranges[i,1]) >= _large: f.write('{0:22.14e} \n'.format(params[i])) elif pranges[i,0] == pranges[i,1]: f.write('{0:22.14e} {1:22.14e}\n'.format(params[i],pranges[i,0])) else: f.write('{0:22.14e} {1:22.14e} {2:22.14e}\n'.format(params[i], pranges[i,0], pranges[i,1])) f.close() def gather_pmd_data(basedir): #.....initialize variables ergs=np.zeros(len(_samples)) frcs= [] for smpl in _samples: frcs.append(np.zeros((smpl.natm,3))) #.....read data for i in range(len(_sample_dirs)): dir= _sample_dirs[i] smpl= _samples[i] #.....force ff=open(basedir+'/'+dir+'/frc.pmd','r') natm= int(ff.readline().split()[0]) #.....energy f=open(basedir+'/'+dir+'/erg.pmd','r') ergs[i]= float(f.readline().split()[0]) f.close() for j in range(natm): data= ff.readline().split() for k in range(3): frcs[i][j,k]= float(data[k]) ff.close() return (ergs,frcs) def gather_smd_data(basedir): #.....initialize variables ergs=np.zeros(len(_samples)) frcs= [] for smpl in _samples: frcs.append(np.zeros((smpl.natm,3))) #.....read data for i in range(len(_sample_dirs)): dir= _sample_dirs[i] smpl= _samples[i] #.....force ff=open(basedir+'/'+dir+'/frc.smd','r') natm= int(ff.readline().split()[0]) #.....energy f=open(basedir+'/'+dir+'/erg.smd','r') ergs[i]= float(f.readline().split()[0]) f.close() for j in range(natm): data= ff.readline().split() for k in range(3): frcs[i][j,k]= float(data[k]) ff.close() return (ergs,frcs) def calc_ef_from_bases(x,*args): """ Calculate energies and forces of every samples using bases data. """ global _hl1,_ergs,_frcs,_wgt1,_wgt2,_wgt3,_aml,_bml #.....initialize variables if _nl == 1: _wgt1,_wgt2= vars2wgts(x) elif _nl == 2: _wgt1,_wgt2,_wgt3= vars2wgts(x) es=np.zeros(len(_samples)) fs= [] for smpl in _samples: fs.append(np.zeros((smpl.natm,3))) p= mp.Pool(_nprcs) _hl1= [] _aml= [] _bml= [] if _nprcs == 1: for ismpl in range(len(_samples)): smpl= _samples[ismpl] if _nl == 1: est,fst,hl1s,ams,bms= calc_ef1(ismpl,x,*args) _hl1.append(hl1s) _aml.append(ams) _bml.append(bms) elif _nl == 2: est,fst,hl1s,hl2s,ams,bms,cms= calc_ef2(ismpl,x,*args) _hl1.append(hl1s) _hl2.append(hl2s) _aml.append(ams) _bml.append(bms) _cml.append(cms) es[ismpl]= est for ia in range(smpl.natm): fs[ismpl][ia,0] += fst[ia,0] fs[ismpl][ia,1] += fst[ia,1] fs[ismpl][ia,2] += fst[ia,2] else: func_args=[] if _nl == 1: for ismpl in range(len(_samples)): func_args.append( (calc_ef1,ismpl,x) ) elif _nl == 2: for ismpl in range(len(_samples)): func_args.append( (calc_ef2,ismpl,x) ) results= p.map(arg_wrapper,func_args) p.close() p.join() for ismpl in range(len(_samples)): smpl= _samples[ismpl] if _nl == 1: est,fst,hl1s,ams,bms= results[ismpl] _hl1.append(hl1s) _aml.append(ams) _bml.append(bms) elif _nl == 2: est,fst,hl1s,hl2s,ams,bms,cms= results[ismpl] _hl1.append(hl1s) _hl2.append(hl2s) _aml.append(ams) _bml.append(bms) _cml.append(cms) es[ismpl]= est for ia in range(smpl.natm): fs[ismpl][ia,0] += fst[ia,0] fs[ismpl][ia,1] += fst[ia,1] fs[ismpl][ia,2] += fst[ia,2] # print ' es:' # print es _ergs= es _frcs= fs return (es,fs) def arg_wrapper(args): return args[0](*args[1:]) def calc_ef(ismpl,x,*args): global _wgt1,_wgt2 smpl= _samples[ismpl] gsfs= _gsf[ismpl] dgsfs= _dgsf[ismpl] es= 0.0 fs= np.zeros((smpl.natm,3)) #.....energy iprm= 0 #print ' ismpl=',ismpl hl1s= np.zeros((_nhl1+1,smpl.natm)) for ia in range(smpl.natm): #.....calc hidden-layer value using sigmoid function tmp= 0.0 for ihl1 in range(1,_nhl1+1): for isf in range(1,_nsf+1): hl1s[ihl1,ia]+= _wgt1[ihl1,isf] *gsfs[ia,isf] hl1s[ihl1,ia]= sigmoid(hl1s[ihl1,ia]) for ihl1 in range(1,_nhl1+1): es += _wgt2[ihl1] *(hl1s[ihl1,ia]-0.5) #.....forces if _fmatch: for ihl1 in range(1,_nhl1+1): w2= _wgt2[ihl1] for ja in range(smpl.natm): dh=hl1s[ihl1,ja]*(1.0-hl1s[ihl1,ja]) for isf in range(1,_nsf+1): w1= _wgt1[ihl1,isf] for ia in range(smpl.natm): fs[ia,:] -= w1*w2*dh*dgsfs[isf,ja,ia,:] return (es,fs,hl1s) def calc_ef1(ismpl,x,*args): """ Calculate energy and forces of sample-i in the case of 1 hidden layers. """ smpl= _samples[ismpl] gsfs= _gsf[ismpl] dgsfs= _dgsf[ismpl] es= 0.0 fs= np.zeros((smpl.natm,3)) #.....energy iprm= 0 #print ' ismpl=',ismpl hl1s= np.zeros((_nhl1+1,smpl.natm)) for ia in range(smpl.natm): #.....calc hidden-layer value using sigmoid function tmp= 0.0 for ihl1 in range(1,_nhl1+1): for isf in range(1,_nsf+1): hl1s[ihl1,ia]+= _wgt1[ihl1,isf] *gsfs[ia,isf] hl1s[ihl1,ia]= sigmoid(hl1s[ihl1,ia]) for ihl1 in range(1,_nhl1+1): es += _wgt2[ihl1] *(hl1s[ihl1,ia]-0.5) #.....forces ams= np.zeros((_nsf+1,_nhl1+1,smpl.natm,3)) bms= np.zeros((_nsf+1,_nhl1+1,smpl.natm,3)) if _fmatch: dg= np.zeros(3) for ihl1 in range(1,_nhl1+1): w2= _wgt2[ihl1] for isf in range(1,_nsf+1): w1= _wgt1[ihl1,isf] for ja in range(smpl.natm): h= hl1s[ihl1,ja] dh= h*(1.0-h) ddhg= dh*(1.0-2.0*h)*gsfs[ja,isf] for ia in range(smpl.natm): fs[ia,:] -= w1*w2*dh*dgsfs[isf,ja,ia,:] ams[isf,ihl1,ia,:] += dh*dgsfs[isf,ja,ia,:] bms[isf,ihl1,ia,:] += ddhg*dgsfs[isf,ja,ia,:] return (es,fs,hl1s,ams,bms) def calc_ef2(ismpl,x,*args): """ Calculate energy and forces of sample-i in the case of 2 hidden layers. """ smpl= _samples[ismpl] gsfs= _gsf[ismpl] dgsfs= _dgsf[ismpl] es= 0.0 fs= np.zeros((smpl.natm,3)) #.....energy iprm= 0 #print ' ismpl=',ismpl hl1s= np.zeros((_nhl1+1,smpl.natm)) hl2s= np.zeros((_nhl2+1,smpl.natm)) for ia in range(smpl.natm): #.....calc hidden-layer value using sigmoid function for ihl1 in range(1,_nhl1+1): tmp= 0.0 for isf in range(1,_nsf+1): tmp += _wgt1[ihl1,isf] *gsfs[ia,isf] hl1s[ihl1,ia]= sigmoid(tmp) for ihl2 in range(1,_nhl2+1): tmp= 0.0 for ihl1 in range(1,_nhl1+1): tmp += _wgt2[ihl2,ihl1] *(hl1s[ihl1,ia]-0.5) hl2s[ihl2,ia]= sigmoid(tmp) for ihl2 in range(1,_nhl2+1): es += _wgt3[ihl2] *(hl2s[ihl2,ia]-0.5) #.....forces ams3= np.zeros((_nhl2+1,smpl.natm,3)) ams2= np.zeros((_nhl1+1,_nhl2+1,smpl.natm,3)) ams1= np.zeros((_nsf+1,_nhl1+1,smpl.natm,3)) if _fmatch: dg= np.zeros(3) for ihl2 in range(1,_nhl2+1): w3= _wgt3[ihl2] for ihl1 in range(1,_nhl1+1): w2= _wgt2[ihl2,ihl1] for isf in range(1,_nsf+1): w1= _wgt1[ihl1,isf] for ja in range(smpl.natm): gj= gsfs[ja,isf] h1= hl1s[ihl1,ja] dh1= h1*(1.0-h1) ddh1= -dh1*dh1 ddh1g= dh1*(1.0-2.0*h1)*gj h2= hl2s[ihl2,ja] dh2= h2*(1.0-h2) ddh2= -dh2*dh2 t0= w1*w2*w3*dh1*dh2 t3= dh1*dh2*w2*w1 t2= w3*w1*(ddh2*dh1*w2+h2)*dh1 t1= w3*w2*(ddh2*dh1*w1*w2*dh1*gj*dh1*w1 \ +dh2*ddh1*w1*gj*w1 +dh2*dh1) for ia in range(smpl.natm): dg[:]= dgsfs[isf,ja,ia,:] fs[ia,:] -= t0*dg[:] ams3[ihl2,ia,:] += t3*dg[:] ams2[ihl1,ihl2,ia,:] += t2*dg[:] ams1[isf,ihl1,ia,:] += t1*dg[:] return (es,fs,hl1s,hl2s,ams1,ams2,ams3) def grad(x,*args): global _ergs,_frcs,_hl1 t0= time.time() dir= args[0] if len(_hl1) == 0: _ergs,_frcs= calc_ef_from_bases(x,*args) if len(_ergs) == 0: _ergs,_frcs= gather_smd_data(dir) p= mp.Pool(_nprcs) grad= np.zeros(len(x)) if _nprcs == 1: for ismpl in range(len(_samples)): if _nl == 1: gs= grad_core_new1(ismpl,x) elif _nl == 2: gs= grad_core_new2(ismpl,x) for iprm in range(len(x)): grad[iprm] += gs[iprm] else: func_args=[] if _nl == 1: for ismpl in range(len(_samples)): func_args.append( (grad_core_new1,ismpl,x)) elif _nl == 2: for ismpl in range(len(_samples)): func_args.append( (grad_core_new2,ismpl,x)) results= p.map(arg_wrapper,func_args) p.close() p.join() for ismpl in range(len(_samples)): gs= results[ismpl] for iprm in range(len(x)): grad[iprm] += gs[iprm] print ' ===> time NN.grad: {0:12.3f} sec'.format(time.time()-t0) #print ' grad=',grad return grad def grad_core(ismpl,ergs,frcs,*args): x = args[0] gs= np.zeros(len(x)) dgs=np.zeros(len(x)) smpl= _samples[ismpl] ediff= (ergs[ismpl] -_ergrefs[ismpl]) /smpl.natm gsfs= _gsf[ismpl] hl1s= _hl1[ismpl] iprm= 0 #print ' ismpl=',ismpl for isf in range(1,_nsf+1): for ihl1 in range(1,_nhl1+1): tmp= 0.0 for ia in range(smpl.natm): tmp += _wgt2[ihl1] *hl1s[ia,ihl1] \ *(1.0 -hl1s[ia,ihl1]) *gsfs[ia,isf] gs[iprm] += 2.0*ediff*tmp iprm += 1 for ihl1 in range(1,_nhl1+1): tmp= 0.0 for ia in range(smpl.natm): tmp += (hl1s[ia,ihl1] -0.5) gs[iprm] += 2.0*ediff*tmp iprm += 1 # for ia in range(smpl.natm): # for ihl1 in range(_nhl1+1): # print ' ia,ihl1,hl1s=',ia,ihl1,hl1s[ia,ihl1] if _fmatch: amls= _aml[ismpl] bmls= _bml[ismpl] iprm= 0 #print ' ismpl=',ismpl for isf in range(1,_nsf+1): for ihl1 in range(1,_nhl1+1): tmp= 0.0 w2= _wgt2[ihl1] for ia in range(smpl.natm): fdiff= frcs[ismpl][ia] -_frcrefs[ismpl][ia] am= amls[ia,ihl1,isf] bm= bmls[ia,ihl1,isf] #print ' isf,ihl1,ia,am,bm=',isf,ihl1,ia,am[:],bm[:] tmp -= 2.0*w2*( fdiff[0]*(am[0]+bm[0]) \ +fdiff[1]*(am[1]+bm[1]) \ +fdiff[2]*(am[2]+bm[2]) ) /smpl.natm/3 dgs[iprm] += tmp iprm += 1 for ihl1 in range(1,_nhl1+1): tmp= 0.0 for ia in range(smpl.natm): fdiff= frcs[ismpl][ia] -_frcrefs[ismpl][ia] for isf in range(1,_nsf+1): am= amls[ia,ihl1,isf] w1= _wgt1[ihl1,isf] tmp -= 2.0*w1*( fdiff[0]*am[0] \ +fdiff[1]*am[1] \ +fdiff[2]*am[2] ) /smpl.natm/3 dgs[iprm] += tmp iprm += 1 # print ' gs,dgs,gs+dgs:' # for iprm in range(len(x)): # print ' {0:15.7f} {1:15.7f} {2:15.7f}'.format(gs[iprm],dgs[iprm],gs[iprm]+dgs[iprm]) gs= gs +dgs return gs def grad_core_new(ismpl,*args): x = args[0] gs= np.zeros(len(x)) dgs=np.zeros(len(x)) smpl= _samples[ismpl] ediff= (_ergs[ismpl] -_ergrefs[ismpl]) /smpl.natm gsfs= _gsf[ismpl] dgsfs= _dgsf[ismpl] hl1s= _hl1[ismpl] iprm= _nsf*_nhl1 +_nhl1 for ihl1 in range(_nhl1,0,-1): tmp= 0.0 for ia in range(smpl.natm): tmp += (hl1s[ihl1,ia] -0.5) iprm -= 1 gs[iprm] += 2.0*ediff*tmp for isf in range(_nsf,0,-1): for ihl1 in range(_nhl1,0,-1): tmp= 0.0 for ia in range(smpl.natm): h= hl1s[ihl1,ia] tmp += _wgt2[ihl1] *h*(1.0-h) *gsfs[ia,isf] iprm -= 1 gs[iprm] += 2.0*ediff*tmp if _fmatch: am= np.zeros((_nsf+1,_nhl1+1,smpl.natm,3)) bm= np.zeros((_nsf+1,_nhl1+1,smpl.natm,3)) cm= np.zeros(3) iprm= _nsf*_nhl1 +_nhl1 for ihl1 in range(_nhl1,0,-1): tmp= 0.0 for ja in range(smpl.natm): h= hl1s[ihl1,ja] dh= h*(1.0-h) ddh= h*(1.0-h)*(1.0-2.0*h) for isf in range(1,_nsf+1): w1= _wgt1[ihl1,isf] ddhg= ddh*gsfs[ja,isf] for ia in range(smpl.natm): fdiff= (_frcs[ismpl][ia] -_frcrefs[ismpl][ia]) \ *2/smpl.natm/3 am[isf,ihl1,ia,:]+= dh*dgsfs[isf,ja,ia,:] bm[isf,ihl1,ia,:]+= ddhg*dgsfs[isf,ja,ia,:] tmp -= w1*( fdiff[0]*dh*dgsfs[isf,ja,ia,0] \ +fdiff[1]*dh*dgsfs[isf,ja,ia,1] \ +fdiff[2]*dh*dgsfs[isf,ja,ia,2] ) iprm -= 1 dgs[iprm] += tmp for isf in range(_nsf,0,-1): for ihl1 in range(_nhl1,0,-1): tmp= 0.0 w2= _wgt2[ihl1] for ia in range(smpl.natm): fdiff= (_frcs[ismpl][ia] -_frcrefs[ismpl][ia]) \ *2 /smpl.natm/3 cm[:]= am[isf,ihl1,ia,:] +bm[isf,ihl1,ia,:] tmp -= w2*( fdiff[0]*(cm[0]) \ +fdiff[1]*(cm[1]) \ +fdiff[2]*(cm[2]) ) iprm -= 1 dgs[iprm] += tmp gs[:]= gs[:] +dgs[:] return gs def grad_core_new1(ismpl,*args): x = args[0] gs= np.zeros(len(x)) dgs=np.zeros(len(x)) smpl= _samples[ismpl] ediff= (_ergs[ismpl] -_ergrefs[ismpl]) /smpl.natm fdiff= (_frcs[ismpl][:,:] -_frcrefs[ismpl][:,:])*2/smpl.natm/3 gsfs= _gsf[ismpl] dgsfs= _dgsf[ismpl] hl1s= _hl1[ismpl] iprm= _nsf*_nhl1 +_nhl1 for ihl1 in range(_nhl1,0,-1): tmp= 0.0 for ia in range(smpl.natm): tmp += (hl1s[ihl1,ia] -0.5) iprm -= 1 gs[iprm] += 2.0*ediff*tmp for isf in range(_nsf,0,-1): for ihl1 in range(_nhl1,0,-1): tmp= 0.0 for ia in range(smpl.natm): h= hl1s[ihl1,ia] tmp += _wgt2[ihl1] *h*(1.0-h) *gsfs[ia,isf] iprm -= 1 gs[iprm] += 2.0*ediff*tmp if _fmatch: # am= np.zeros((_nsf+1,_nhl1+1,smpl.natm,3)) # bm= np.zeros((_nsf+1,_nhl1+1,smpl.natm,3)) ams= _aml[ismpl] bms= _bml[ismpl] cm= np.zeros(3) iprm= _nsf*_nhl1 +_nhl1 for ihl1 in range(_nhl1,0,-1): tmp= 0.0 for isf in range(1,_nsf+1): w1= _wgt1[ihl1,isf] for ia in range(smpl.natm): tmp -= w1*( fdiff[ia,0]*ams[isf,ihl1,ia,0] \ +fdiff[ia,1]*ams[isf,ihl1,ia,1] \ +fdiff[ia,2]*ams[isf,ihl1,ia,2] ) iprm -= 1 dgs[iprm] += tmp for isf in range(_nsf,0,-1): for ihl1 in range(_nhl1,0,-1): tmp= 0.0 w2= _wgt2[ihl1] for ia in range(smpl.natm): cm[:]= ams[isf,ihl1,ia,:] +bms[isf,ihl1,ia,:] tmp -= w2*( fdiff[ia,0]*cm[0] \ +fdiff[ia,1]*cm[1] \ +fdiff[ia,2]*cm[2] ) iprm -= 1 dgs[iprm] += tmp gs[:]= gs[:] +dgs[:] return gs def grad_core_new2(ismpl,*args): x = args[0] gs= np.zeros(len(x)) dgs=np.zeros(len(x)) smpl= _samples[ismpl] ediff= (_ergs[ismpl] -_ergrefs[ismpl]) /smpl.natm gsfs= _gsf[ismpl] hl1s= _hl1[ismpl] hl2s= _hl2[ismpl] iprm= _nsf*_nhl1 +_nhl1*_nhl2 +_nhl2 for ihl2 in range(_nhl2,0,-1): tmp= 0.0 for ia in range(smpl.natm): tmp += (hl2s[ihl2,ia] -0.5) iprm -= 1 gs[iprm] += 2.0*ediff*tmp for ihl1 in range(_nhl1,0,-1): for ihl2 in range(_nhl2,0,-1): tmp= 0.0 for ia in range(smpl.natm): h2= hl2s[ihl2,ia] h1= hl1s[ihl1,ia] tmp += _wgt3[ihl2] *h2*(1.0-h2) *h1 iprm -= 1 gs[iprm] += 2.0*ediff*tmp for isf in range(_nsf,0,-1): for ihl1 in range(_nhl1,0,-1): tmp= 0.0 for ia in range(smpl.natm): h1= hl1s[ihl1,ia] dh1gsfi= gsfs[ia,isf] *h1*(1.0-h1) for ihl2 in range(_nhl2): h2= hl2s[ihl2,ia] w2= _wgt2[ihl2,ihl1] w3= _wgt3[ihl2] tmp += w2*w3 *h2*(1.0-h2) *dh1gsfi iprm -= 1 gs[iprm] += 2.0*ediff*tmp if _fmatch: fdiff= (_frcs[ismpl][:,:] -_frcrefs[ismpl][:,:])*2 \ /smpl.natm/3 ams1= _aml[ismpl] ams2= _bml[ismpl] ams3= _cml[ismpl] cm= np.zeros(3) iprm= _nsf*_nhl1 +_nhl1*_nhl2 +_nhl2 for ihl2 in range(_nhl2,0,-1): tmp= 0.0 for ia in range(smpl.natm): tmp -= fdiff[ia,0]*ams3[ihl2,ia,0] \ +fdiff[ia,1]*ams3[ihl2,ia,1] \ +fdiff[ia,2]*ams3[ihl2,ia,2] iprm -= 1 dgs[iprm] += tmp for ihl1 in range(_nhl1,0,-1): for ihl2 in range(_nhl2,0,-1): tmp= 0.0 for ia in range(smpl.natm): tmp -= fdiff[ia,0]*ams2[ihl1,ihl2,ia,0] \ +fdiff[ia,1]*ams2[ihl1,ihl2,ia,1] \ +fdiff[ia,2]*ams2[ihl1,ihl2,ia,2] iprm -= 1 dgs[iprm] += tmp for isf in range(_nsf,0,-1): for ihl1 in range(_nhl1,0,-1): tmp= 0.0 for ia in range(smpl.natm): tmp -= fdiff[ia,0]*ams1[isf,ihl1,ia,0] \ +fdiff[ia,1]*ams1[isf,ihl1,ia,1] \ +fdiff[ia,2]*ams1[isf,ihl1,ia,2] iprm -= 1 dgs[iprm] += tmp gs[:]= gs[:] +dgs[:] return gs def gather_basis(*args): gsf= [] hl1= [] aml= [] bml= [] #...read basis data for i in range(len(_sample_dirs)): dir= _sample_dirs[i] smpl= _samples[i] f1= open(_basedir+'/'+dir+'/pmd/out.NN.gsf','r') f2= open(_basedir+'/'+dir+'/pmd/out.NN.hl1','r') f3= open(_basedir+'/'+dir+'/pmd/out.NN.aml','r') f4= open(_basedir+'/'+dir+'/pmd/out.NN.bml','r') #.....skip 1st line data1= f1.readline().split() data2= f2.readline().split() data3= f3.readline().split() data4= f4.readline().split() gsfs= np.zeros((smpl.natm,_nsf+1)) hl1s= np.zeros((smpl.natm,_nhl1+1)) amls= np.zeros((smpl.natm,_nhl1+1,_nsf+1,3)) bmls= np.zeros((smpl.natm,_nhl1+1,_nsf+1,3)) for ia in range(smpl.natm): for isf in range(1,_nsf+1): data1= f1.readline().split() gsfs[ia,isf]= float(data1[2]) for ia in range(smpl.natm): for ihl1 in range(1,_nhl1+1): data2= f2.readline().split() hl1s[ia,ihl1]= float(data2[2]) for ia in range(smpl.natm): for ihl1 in range(1,_nhl1+1): for isf in range(1,_nsf+1): data3= f3.readline().split() amls[ia,ihl1,isf,0]= float(data3[3]) amls[ia,ihl1,isf,1]= float(data3[4]) amls[ia,ihl1,isf,2]= float(data3[5]) data4= f4.readline().split() bmls[ia,ihl1,isf,0]= float(data4[3]) bmls[ia,ihl1,isf,1]= float(data4[4]) bmls[ia,ihl1,isf,2]= float(data4[5]) gsf.append(gsfs) hl1.append(hl1s) aml.append(amls) bml.append(bmls) f1.close() f2.close() f3.close() f4.close() # print ' hl1:' # for ismpl in range(len(_samples)): # smpl= _samples[ismpl] # for ia in range(smpl.natm): # for ihl1 in range(_nhl1+1): # print ' ismpl,ia,ihl1,hl1=',ismpl,ia,ihl1,hl1[ismpl][ia,ihl1] return gsf,hl1,aml,bml def gather_bases_new(*args): gsf= [] dgsf=[] #...read basis data for i in range(len(_sample_dirs)): dir= _sample_dirs[i] smpl= _samples[i] f1= open(_basedir+'/'+dir+'/smd/out.NN.gsf','r') f2= open(_basedir+'/'+dir+'/smd/out.NN.dgsf','r') #.....skip 1st line data1= f1.readline().split() #data2= f2.readline().split() gsfs= np.zeros((smpl.natm,_nsf+1)) dgsfs=np.zeros((_nsf+1,smpl.natm,smpl.natm,3)) for ia in range(smpl.natm): for isf in range(1,_nsf+1): data1= f1.readline().split() gsfs[ia,isf]= float(data1[2]) for ia in range(smpl.natm): for isf in range(1,_nsf+1): for ja in range(smpl.natm): data2= f2.readline().split() dgsfs[isf,ia,ja,0]= float(data2[3]) dgsfs[isf,ia,ja,1]= float(data2[4]) dgsfs[isf,ia,ja,2]= float(data2[5]) gsf.append(gsfs) dgsf.append(dgsfs) f1.close() f2.close() # print ' hl1:' # for ismpl in range(len(_samples)): # smpl= _samples[ismpl] # for ia in range(smpl.natm): # for ihl1 in range(_nhl1+1): # print ' ismpl,ia,ihl1,hl1=',ismpl,ia,ihl1,hl1[ismpl][ia,ihl1] return gsf,dgsf
# Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import itertools from cinder.api import common as cinder_api_common from cinder.api.middleware import auth as cinder_api_middleware_auth from cinder.api.middleware import sizelimit as cinder_api_middleware_sizelimit from cinder.api.views import versions as cinder_api_views_versions from cinder.backup import api as cinder_backup_api from cinder.backup import chunkeddriver as cinder_backup_chunkeddriver from cinder.backup import driver as cinder_backup_driver from cinder.backup.drivers import ceph as cinder_backup_drivers_ceph from cinder.backup.drivers import glusterfs as cinder_backup_drivers_glusterfs from cinder.backup.drivers import google as cinder_backup_drivers_google from cinder.backup.drivers import nfs as cinder_backup_drivers_nfs from cinder.backup.drivers import posix as cinder_backup_drivers_posix from cinder.backup.drivers import swift as cinder_backup_drivers_swift from cinder.backup.drivers import tsm as cinder_backup_drivers_tsm from cinder.backup import manager as cinder_backup_manager from cinder.cmd import all as cinder_cmd_all from cinder.cmd import volume as cinder_cmd_volume from cinder.common import config as cinder_common_config import cinder.compute from cinder.compute import nova as cinder_compute_nova from cinder import context as cinder_context from cinder import coordination as cinder_coordination from cinder.db import api as cinder_db_api from cinder.db import base as cinder_db_base from cinder import exception as cinder_exception from cinder.image import glance as cinder_image_glance from cinder.image import image_utils as cinder_image_imageutils import cinder.keymgr from cinder.keymgr import conf_key_mgr as cinder_keymgr_confkeymgr from cinder.keymgr import key_mgr as cinder_keymgr_keymgr from cinder.message import api as cinder_message_api from cinder import quota as cinder_quota from cinder.scheduler import driver as cinder_scheduler_driver from cinder.scheduler import host_manager as cinder_scheduler_hostmanager from cinder.scheduler import manager as cinder_scheduler_manager from cinder.scheduler import scheduler_options as \ cinder_scheduler_scheduleroptions from cinder.scheduler.weights import capacity as \ cinder_scheduler_weights_capacity from cinder.scheduler.weights import volume_number as \ cinder_scheduler_weights_volumenumber from cinder import service as cinder_service from cinder import ssh_utils as cinder_sshutils from cinder.transfer import api as cinder_transfer_api from cinder.volume import api as cinder_volume_api from cinder.volume import driver as cinder_volume_driver from cinder.volume.drivers import block_device as \ cinder_volume_drivers_blockdevice from cinder.volume.drivers import blockbridge as \ cinder_volume_drivers_blockbridge from cinder.volume.drivers.cloudbyte import options as \ cinder_volume_drivers_cloudbyte_options from cinder.volume.drivers import coho as cinder_volume_drivers_coho from cinder.volume.drivers.coprhd import common as \ cinder_volume_drivers_coprhd_common from cinder.volume.drivers.coprhd import scaleio as \ cinder_volume_drivers_coprhd_scaleio from cinder.volume.drivers import datera as cinder_volume_drivers_datera from cinder.volume.drivers.dell import dell_storagecenter_common as \ cinder_volume_drivers_dell_dellstoragecentercommon from cinder.volume.drivers.disco import disco as \ cinder_volume_drivers_disco_disco from cinder.volume.drivers.dothill import dothill_common as \ cinder_volume_drivers_dothill_dothillcommon from cinder.volume.drivers import drbdmanagedrv as \ cinder_volume_drivers_drbdmanagedrv from cinder.volume.drivers.emc import emc_vmax_common as \ cinder_volume_drivers_emc_emcvmaxcommon from cinder.volume.drivers.emc import scaleio as \ cinder_volume_drivers_emc_scaleio from cinder.volume.drivers.emc.vnx import common as \ cinder_volume_drivers_emc_vnx_common from cinder.volume.drivers.emc import xtremio as \ cinder_volume_drivers_emc_xtremio from cinder.volume.drivers import eqlx as cinder_volume_drivers_eqlx from cinder.volume.drivers.falconstor import fss_common as \ cinder_volume_drivers_falconstor_fsscommon from cinder.volume.drivers.fujitsu import eternus_dx_common as \ cinder_volume_drivers_fujitsu_eternusdxcommon from cinder.volume.drivers.fusionstorage import dsware as \ cinder_volume_drivers_fusionstorage_dsware from cinder.volume.drivers import glusterfs as cinder_volume_drivers_glusterfs from cinder.volume.drivers import hgst as cinder_volume_drivers_hgst from cinder.volume.drivers.hitachi import hbsd_common as \ cinder_volume_drivers_hitachi_hbsdcommon from cinder.volume.drivers.hitachi import hbsd_fc as \ cinder_volume_drivers_hitachi_hbsdfc from cinder.volume.drivers.hitachi import hbsd_horcm as \ cinder_volume_drivers_hitachi_hbsdhorcm from cinder.volume.drivers.hitachi import hbsd_iscsi as \ cinder_volume_drivers_hitachi_hbsdiscsi from cinder.volume.drivers.hitachi import hnas_iscsi as \ cinder_volume_drivers_hitachi_hnasiscsi from cinder.volume.drivers.hitachi import hnas_nfs as \ cinder_volume_drivers_hitachi_hnasnfs from cinder.volume.drivers.hpe import hpe_3par_common as \ cinder_volume_drivers_hpe_hpe3parcommon from cinder.volume.drivers.hpe import hpe_lefthand_iscsi as \ cinder_volume_drivers_hpe_hpelefthandiscsi from cinder.volume.drivers.hpe import hpe_xp_opts as \ cinder_volume_drivers_hpe_hpexpopts from cinder.volume.drivers.huawei import huawei_driver as \ cinder_volume_drivers_huawei_huaweidriver from cinder.volume.drivers.ibm import flashsystem_common as \ cinder_volume_drivers_ibm_flashsystemcommon from cinder.volume.drivers.ibm import flashsystem_fc as \ cinder_volume_drivers_ibm_flashsystemfc from cinder.volume.drivers.ibm import flashsystem_iscsi as \ cinder_volume_drivers_ibm_flashsystemiscsi from cinder.volume.drivers.ibm import gpfs as cinder_volume_drivers_ibm_gpfs from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_common as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_fc as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc from cinder.volume.drivers.ibm.storwize_svc import storwize_svc_iscsi as \ cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi from cinder.volume.drivers.ibm import xiv_ds8k as \ cinder_volume_drivers_ibm_xivds8k from cinder.volume.drivers.infortrend.eonstor_ds_cli import common_cli as \ cinder_volume_drivers_infortrend_eonstor_ds_cli_commoncli from cinder.volume.drivers.kaminario import kaminario_common as \ cinder_volume_drivers_kaminario_kaminariocommon from cinder.volume.drivers.lenovo import lenovo_common as \ cinder_volume_drivers_lenovo_lenovocommon from cinder.volume.drivers import lvm as cinder_volume_drivers_lvm from cinder.volume.drivers.netapp import options as \ cinder_volume_drivers_netapp_options from cinder.volume.drivers.nexenta import options as \ cinder_volume_drivers_nexenta_options from cinder.volume.drivers import nfs as cinder_volume_drivers_nfs from cinder.volume.drivers import nimble as cinder_volume_drivers_nimble from cinder.volume.drivers.prophetstor import options as \ cinder_volume_drivers_prophetstor_options from cinder.volume.drivers import pure as cinder_volume_drivers_pure from cinder.volume.drivers import quobyte as cinder_volume_drivers_quobyte from cinder.volume.drivers import rbd as cinder_volume_drivers_rbd from cinder.volume.drivers import remotefs as cinder_volume_drivers_remotefs from cinder.volume.drivers.san.hp import hpmsa_common as \ cinder_volume_drivers_san_hp_hpmsacommon from cinder.volume.drivers.san import san as cinder_volume_drivers_san_san from cinder.volume.drivers import scality as cinder_volume_drivers_scality from cinder.volume.drivers import sheepdog as cinder_volume_drivers_sheepdog from cinder.volume.drivers import smbfs as cinder_volume_drivers_smbfs from cinder.volume.drivers import solidfire as cinder_volume_drivers_solidfire from cinder.volume.drivers.synology import synology_common as \ cinder_volume_drivers_synology_synologycommon from cinder.volume.drivers import tegile as cinder_volume_drivers_tegile from cinder.volume.drivers import tintri as cinder_volume_drivers_tintri from cinder.volume.drivers.violin import v7000_common as \ cinder_volume_drivers_violin_v7000common from cinder.volume.drivers.vmware import vmdk as \ cinder_volume_drivers_vmware_vmdk from cinder.volume.drivers import vzstorage as cinder_volume_drivers_vzstorage from cinder.volume.drivers.windows import windows as \ cinder_volume_drivers_windows_windows from cinder.volume.drivers import xio as cinder_volume_drivers_xio from cinder.volume.drivers import zadara as cinder_volume_drivers_zadara from cinder.volume.drivers.zfssa import zfssaiscsi as \ cinder_volume_drivers_zfssa_zfssaiscsi from cinder.volume.drivers.zfssa import zfssanfs as \ cinder_volume_drivers_zfssa_zfssanfs from cinder.volume.drivers.zte import zte_ks as cinder_volume_drivers_zte_zteks from cinder.volume import manager as cinder_volume_manager from cinder.wsgi import eventlet_server as cinder_wsgi_eventletserver from cinder.zonemanager.drivers.brocade import brcd_fabric_opts as \ cinder_zonemanager_drivers_brocade_brcdfabricopts from cinder.zonemanager.drivers.brocade import brcd_fc_zone_driver as \ cinder_zonemanager_drivers_brocade_brcdfczonedriver from cinder.zonemanager.drivers.cisco import cisco_fabric_opts as \ cinder_zonemanager_drivers_cisco_ciscofabricopts from cinder.zonemanager.drivers.cisco import cisco_fc_zone_driver as \ cinder_zonemanager_drivers_cisco_ciscofczonedriver from cinder.zonemanager import fc_zone_manager as \ cinder_zonemanager_fczonemanager def list_opts(): return [ ('FC-ZONE-MANAGER', itertools.chain( cinder_zonemanager_drivers_brocade_brcdfczonedriver.brcd_opts, cinder_zonemanager_fczonemanager.zone_manager_opts, cinder_zonemanager_drivers_cisco_ciscofczonedriver.cisco_opts, )), ('KEYMGR', itertools.chain( cinder_keymgr_keymgr.encryption_opts, cinder.keymgr.keymgr_opts, cinder_keymgr_confkeymgr.key_mgr_opts, )), ('DEFAULT', itertools.chain( cinder_backup_driver.service_opts, cinder_api_common.api_common_opts, cinder_backup_drivers_ceph.service_opts, cinder_volume_drivers_smbfs.volume_opts, cinder_backup_chunkeddriver.chunkedbackup_service_opts, cinder_volume_drivers_san_san.san_opts, cinder_volume_drivers_hitachi_hnasnfs.NFS_OPTS, cinder_wsgi_eventletserver.socket_opts, cinder_sshutils.ssh_opts, cinder_volume_drivers_netapp_options.netapp_proxy_opts, cinder_volume_drivers_netapp_options.netapp_connection_opts, cinder_volume_drivers_netapp_options.netapp_transport_opts, cinder_volume_drivers_netapp_options.netapp_basicauth_opts, cinder_volume_drivers_netapp_options.netapp_cluster_opts, cinder_volume_drivers_netapp_options.netapp_7mode_opts, cinder_volume_drivers_netapp_options.netapp_provisioning_opts, cinder_volume_drivers_netapp_options.netapp_img_cache_opts, cinder_volume_drivers_netapp_options.netapp_eseries_opts, cinder_volume_drivers_netapp_options.netapp_nfs_extra_opts, cinder_volume_drivers_netapp_options.netapp_san_opts, cinder_volume_drivers_ibm_storwize_svc_storwizesvciscsi. storwize_svc_iscsi_opts, cinder_backup_drivers_glusterfs.glusterfsbackup_service_opts, cinder_volume_drivers_coprhd_scaleio.scaleio_opts, cinder_backup_drivers_tsm.tsm_opts, cinder_volume_drivers_fujitsu_eternusdxcommon. FJ_ETERNUS_DX_OPT_opts, cinder_volume_drivers_ibm_gpfs.gpfs_opts, cinder_volume_drivers_zte_zteks.zte_opts, cinder_volume_drivers_violin_v7000common.violin_opts, cinder_volume_drivers_nexenta_options.NEXENTA_CONNECTION_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_ISCSI_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_DATASET_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_NFS_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_RRMGR_OPTS, cinder_volume_drivers_nexenta_options.NEXENTA_EDGE_OPTS, cinder_exception.exc_log_opts, cinder_common_config.global_opts, cinder_scheduler_weights_capacity.capacity_weight_opts, cinder_volume_drivers_sheepdog.sheepdog_opts, [cinder_api_middleware_sizelimit.max_request_body_size_opt], cinder_volume_drivers_solidfire.sf_opts, cinder_volume_drivers_coprhd_common.volume_opts, cinder_backup_drivers_swift.swiftbackup_service_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_add_qosgroup_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_create_volume_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_connection_opts, cinder_volume_drivers_cloudbyte_options. cloudbyte_update_volume_opts, cinder_service.service_opts, cinder.compute.compute_opts, cinder_volume_drivers_drbdmanagedrv.drbd_opts, cinder_volume_drivers_dothill_dothillcommon.common_opts, cinder_volume_drivers_dothill_dothillcommon.iscsi_opts, cinder_volume_drivers_glusterfs.volume_opts, cinder_volume_drivers_pure.PURE_OPTS, cinder_context.context_opts, cinder_scheduler_driver.scheduler_driver_opts, cinder_volume_drivers_scality.volume_opts, cinder_volume_drivers_vmware_vmdk.vmdk_opts, cinder_volume_drivers_lenovo_lenovocommon.common_opts, cinder_volume_drivers_lenovo_lenovocommon.iscsi_opts, cinder_backup_drivers_posix.posixbackup_service_opts, cinder_volume_drivers_emc_scaleio.scaleio_opts, [cinder_db_base.db_driver_opt], cinder_volume_drivers_eqlx.eqlx_opts, cinder_transfer_api.volume_transfer_opts, cinder_db_api.db_opts, cinder_scheduler_weights_volumenumber. volume_number_weight_opts, cinder_volume_drivers_coho.coho_opts, cinder_volume_drivers_xio.XIO_OPTS, cinder_volume_drivers_ibm_storwize_svc_storwizesvcfc. storwize_svc_fc_opts, cinder_volume_drivers_falconstor_fsscommon.FSS_OPTS, cinder_volume_drivers_zfssa_zfssaiscsi.ZFSSA_OPTS, cinder_volume_driver.volume_opts, cinder_volume_driver.iser_opts, cinder_api_views_versions.versions_opts, cinder_volume_drivers_nimble.nimble_opts, cinder_volume_drivers_windows_windows.windows_opts, cinder_volume_drivers_emc_vnx_common.EMC_VNX_OPTS, cinder_volume_drivers_san_hp_hpmsacommon.common_opts, cinder_volume_drivers_san_hp_hpmsacommon.iscsi_opts, cinder_image_glance.glance_opts, cinder_image_glance.glance_core_properties_opts, cinder_volume_drivers_hpe_hpelefthandiscsi.hpelefthand_opts, cinder_volume_drivers_lvm.volume_opts, cinder_volume_drivers_emc_emcvmaxcommon.emc_opts, cinder_volume_drivers_remotefs.nas_opts, cinder_volume_drivers_remotefs.volume_opts, cinder_volume_drivers_emc_xtremio.XTREMIO_OPTS, cinder_backup_drivers_google.gcsbackup_service_opts, [cinder_api_middleware_auth.use_forwarded_for_opt], cinder_volume_drivers_hitachi_hbsdcommon.volume_opts, cinder_volume_drivers_infortrend_eonstor_ds_cli_commoncli. infortrend_esds_opts, cinder_volume_drivers_infortrend_eonstor_ds_cli_commoncli. infortrend_esds_extra_opts, cinder_volume_drivers_hitachi_hnasiscsi.iSCSI_OPTS, cinder_volume_drivers_rbd.RBD_OPTS, cinder_volume_drivers_tintri.tintri_opts, cinder_backup_api.backup_api_opts, cinder_volume_drivers_hitachi_hbsdhorcm.volume_opts, cinder_backup_manager.backup_manager_opts, cinder_volume_drivers_ibm_storwize_svc_storwizesvccommon. storwize_svc_opts, cinder_volume_drivers_hitachi_hbsdfc.volume_opts, cinder_quota.quota_opts, cinder_volume_drivers_huawei_huaweidriver.huawei_opts, cinder_volume_drivers_synology_synologycommon.cinder_opts, cinder_volume_drivers_dell_dellstoragecentercommon. common_opts, cinder_scheduler_hostmanager.host_manager_opts, [cinder_scheduler_manager.scheduler_driver_opt], cinder_backup_drivers_nfs.nfsbackup_service_opts, cinder_volume_drivers_blockbridge.blockbridge_opts, [cinder_scheduler_scheduleroptions. scheduler_json_config_location_opt], cinder_volume_drivers_zfssa_zfssanfs.ZFSSA_OPTS, cinder_volume_drivers_fusionstorage_dsware.volume_opts, cinder_volume_drivers_kaminario_kaminariocommon. kaminario1_opts, cinder_volume_drivers_disco_disco.disco_opts, cinder_volume_drivers_hgst.hgst_opts, cinder_message_api.messages_opts, cinder_image_imageutils.image_helper_opts, cinder_compute_nova.nova_opts, cinder_volume_drivers_ibm_flashsystemfc.flashsystem_fc_opts, cinder_volume_drivers_prophetstor_options.DPL_OPTS, cinder_volume_drivers_hpe_hpexpopts.FC_VOLUME_OPTS, cinder_volume_drivers_hpe_hpexpopts.COMMON_VOLUME_OPTS, cinder_volume_drivers_hpe_hpexpopts.HORCM_VOLUME_OPTS, cinder_volume_drivers_hitachi_hbsdiscsi.volume_opts, cinder_volume_manager.volume_manager_opts, cinder_volume_drivers_ibm_flashsystemiscsi. flashsystem_iscsi_opts, cinder_volume_drivers_tegile.tegile_opts, cinder_volume_drivers_ibm_flashsystemcommon.flashsystem_opts, [cinder_volume_api.allow_force_upload_opt], [cinder_volume_api.volume_host_opt], [cinder_volume_api.volume_same_az_opt], [cinder_volume_api.az_cache_time_opt], cinder_volume_drivers_ibm_xivds8k.xiv_ds8k_opts, cinder_volume_drivers_hpe_hpe3parcommon.hpe3par_opts, cinder_volume_drivers_datera.d_opts, cinder_volume_drivers_zadara.zadara_opts, cinder_volume_drivers_blockdevice.volume_opts, cinder_volume_drivers_quobyte.volume_opts, cinder_volume_drivers_vzstorage.vzstorage_opts, cinder_volume_drivers_nfs.nfs_opts, )), ('CISCO_FABRIC_EXAMPLE', itertools.chain( cinder_zonemanager_drivers_cisco_ciscofabricopts. cisco_zone_opts, )), ('BRCD_FABRIC_EXAMPLE', itertools.chain( cinder_zonemanager_drivers_brocade_brcdfabricopts. brcd_zone_opts, )), ('COORDINATION', itertools.chain( cinder_coordination.coordination_opts, )), ('BACKEND', itertools.chain( [cinder_cmd_volume.host_opt], [cinder_cmd_all.volume_cmd.host_opt], )), ]
# encoding=utf8 import datetime from distutils.version import StrictVersion import hashlib import os.path import shutil import socket import sys import time import random import string import seesaw from seesaw.config import NumberConfigValue from seesaw.externalprocess import ExternalProcess from seesaw.item import ItemInterpolation, ItemValue from seesaw.pipeline import Pipeline from seesaw.project import Project from seesaw.task import SimpleTask, LimitConcurrent from seesaw.tracker import GetItemFromTracker, PrepareStatsForTracker, \ UploadWithTracker, SendDoneToTracker from seesaw.util import find_executable # check the seesaw version if StrictVersion(seesaw.__version__) < StrictVersion("0.1.5"): raise Exception("This pipeline needs seesaw version 0.1.5 or higher.") ########################################################################### # Find a useful rsync_size_tester executable. # RSYNC_TEST = find_executable( "rsync_size_tester", ["1"], [ "./rsync_size_tester.py", "../rsync_size_tester.py", "../../rsync_size_tester.py", "/home/warrior/rsync_size_tester.py", "/usr/bin/rsync_size_tester.py" ] ) #Yes this is hackish but run-pipeline won't let you add more command line args #If the file "LARGE-RSYNC" is in the directory, allow larger rsync's #Using Gigabytes not Gibibytes to be safe if os.path.isfile("LARGE-RSYNC"): MAX_RSYNC = "150000000000" else: MAX_RSYNC = "25000000000" ########################################################################### # The version number of this pipeline definition. # # Update this each time you make a non-cosmetic change. # It will be added to the WARC files and reported to the tracker. VERSION = "20150617.06" USER_AGENT = 'ArchiveTeam' TRACKER_ID = 'sourceforgersync' TRACKER_HOST = 'tracker.archiveteam.org' ########################################################################### # This section defines project-specific tasks. # # Simple tasks (tasks that do not need any concurrency) are based on the # SimpleTask class and have a process(item) method that is called for # each item. class CheckIP(SimpleTask): def __init__(self): SimpleTask.__init__(self, "CheckIP") self._counter = 0 def process(self, item): # NEW for 2014! Check if we are behind firewall/proxy if self._counter <= 0: item.log_output('Checking IP address.') ip_set = set() ip_set.add(socket.gethostbyname('twitter.com')) ip_set.add(socket.gethostbyname('facebook.com')) ip_set.add(socket.gethostbyname('youtube.com')) ip_set.add(socket.gethostbyname('microsoft.com')) ip_set.add(socket.gethostbyname('icanhas.cheezburger.com')) ip_set.add(socket.gethostbyname('archiveteam.org')) if len(ip_set) != 6: item.log_output('Got IP addresses: {0}'.format(ip_set)) item.log_output( 'Are you behind a firewall/proxy? That is a big no-no!') raise Exception( 'Are you behind a firewall/proxy? That is a big no-no!') # Check only occasionally if self._counter <= 0: self._counter = 10 else: self._counter -= 1 class PrepareDirectories(SimpleTask): def __init__(self, warc_prefix): SimpleTask.__init__(self, "PrepareDirectories") self.warc_prefix = warc_prefix def process(self, item): item_name = item["item_name"] dirname = "/".join((item["data_dir"], item_name)) if os.path.isdir(dirname): shutil.rmtree(dirname) os.makedirs(dirname) item["item_dir"] = dirname item["warc_file_base"] = "%s-%s-%s" % (self.warc_prefix, item_name.replace(':', '_'), time.strftime("%Y%m%d-%H%M%S")) open("%(item_dir)s/%(warc_file_base)s.warc.gz" % item, "w").close() class getRsyncURL(object): def __init__(self,default_target): #SimpleTask.__init__(self, "GetRsyncURL") self.target = default_target def realize(self, item): #item.log_output(item['item_name']) item_type, item_project, item_mountpoint = item['item_name'].split(':') if item_type == "git": self.target = "git.code.sf.net::p/%(project)s/%(mountpoint)s.git" % {"project":item_project, "mountpoint":item_mountpoint} elif item_type == "svn": self.target = "svn.code.sf.net::p/%(project)s/%(mountpoint)s" % {"project":item_project, "mountpoint":item_mountpoint} elif item_type == "hg": self.target = "hg.code.sf.net::p/%(project)s/%(mountpoint)s" % {"project":item_project, "mountpoint":item_mountpoint} elif item_type == "cvs": self.target = "rsync://%(project)s.cvs.sourceforge.net/cvsroot/%(project)s/*" % {"project":item_project, "mountpoint":item_mountpoint} elif item_type == "bzr": self.target = "%(project)s.bzr.sourceforge.net::bzrroot/%(mountpoint)s/*" % {"project":item_project, "mountpoint":item_mountpoint} item.log_output(self.target) return self.target def __str__(self): return self.target class outputName(object): def __init__(self): pass def realize(self, item): #item.log_output(item['item_name']) item_type, item_project, item_mountpoint = item['item_name'].split(':') return "%(project)s-%(SCM)s-%(mountpoint)s" % {"project":item_project, "SCM":item_type, "mountpoint":item_mountpoint} class cleanItem(object): '''Removes the : in an item while formatting based on ItemInterpolation''' def __init__(self, s): self.s = s def realize(self, item): return string.replace(self.s % item,":",".") def __str__(self): return "<'" + string.replace(self.s % item,":",".") + "'>" class MoveFiles(SimpleTask): def __init__(self): SimpleTask.__init__(self, "MoveFiles") def process(self, item): os.rename("%(item_dir)s/%(warc_file_base)s.txt.gz" % item, "%(data_dir)s/%(warc_file_base)s.txt.gz" % item) shutil.rmtree("%(item_dir)s" % item) def get_hash(filename): with open(filename, 'rb') as in_file: return hashlib.sha1(in_file.read()).hexdigest() CWD = os.getcwd() PIPELINE_SHA1 = get_hash(os.path.join(CWD, 'pipeline.py')) def stats_id_function(item): # NEW for 2014! Some accountability hashes and stats. d = { 'pipeline_hash': PIPELINE_SHA1, 'python_version': sys.version, } return d ########################################################################### # Initialize the project. # # This will be shown in the warrior management panel. The logo should not # be too big. The deadline is optional. project = Project( title="sourceforgersync", project_html=""" <img class="project-logo" alt="Project logo" src="" height="50px" title=""/> <h2>sourceforge.net <span class="links"><a href="http://sourceforge.net/">Website</a> &middot; <a href="http://tracker.archiveteam.org/sourceforge/">Leaderboard</a></span></h2> <p>Saving all project from SourceForge. rsyncing all of the source code repositories.</p> """ ) pipeline = Pipeline( CheckIP(), GetItemFromTracker("http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader, VERSION), ExternalProcess("Size Test",[RSYNC_TEST,"-t",getRsyncURL("foo"),"-m",MAX_RSYNC]), LimitConcurrent(1,ExternalProcess("rsync", ["rsync", "-av", getRsyncURL("foo"), cleanItem("%(data_dir)s/%(item_name)s")])), ExternalProcess("tar", ["tar", "-czf", cleanItem("%(data_dir)s/%(item_name)s.tar.gz"), "-C", ItemInterpolation("%(data_dir)s/"), "--owner=1999", "--group=2015", "--no-same-permissions", cleanItem("%(item_name)s")]), LimitConcurrent(NumberConfigValue(min=1, max=4, default="1", name="shared:rsync_threads", title="Rsync threads", description="The maximum number of concurrent uploads."), UploadWithTracker( "http://%s/%s" % (TRACKER_HOST, TRACKER_ID), downloader=downloader, version=VERSION, files=[ cleanItem("%(data_dir)s/%(item_name)s.tar.gz") #ItemInterpolation("foo.tar.gz") ], rsync_target_source_path=ItemInterpolation("%(data_dir)s/"), rsync_extra_args=[ "--recursive", "--partial", "--partial-dir", ".rsync-tmp", ] ), ), PrepareStatsForTracker( defaults={"downloader": downloader, "version": VERSION}, file_groups={ "data": [ cleanItem("%(data_dir)s/%(item_name)s.tar.gz") ] }, id_function=stats_id_function, ), SendDoneToTracker( tracker_url="http://%s/%s" % (TRACKER_HOST, TRACKER_ID), stats=ItemValue("stats") ) )
import numpy as np import tensorflow as tf import h5py from sklearn.preprocessing import OneHotEncoder import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import time import scipy.io # Functions for initializing neural nets parameters def weight_variable(shape, var_name): initial = tf.truncated_normal(shape, stddev=0.1, dtype=tf.float64) return tf.Variable(initial, name=var_name) def bias_variable(shape, var_name): initial = tf.constant(0.1, shape=shape, dtype=tf.float64) return tf.Variable(initial, name=var_name) def conv2d(x, W): return tf.nn.conv2d(x, W, [1, 1, 1, 1], 'VALID') def batch_nm(x, eps=1e-5): # batch normalization to have zero mean and unit variance mu, var = tf.nn.moments(x, [0]) return tf.nn.batch_normalization(x, mu, var, None, None, eps) # Download data from .mat file into numpy array print('==> Experiment 8g - with dropout and batch normalization') filepath = '/scratch/ttanpras/exp8a_d7_1s.mat' print('==> Loading data from {}'.format(filepath)) f = h5py.File(filepath) data_train = np.array(f.get('trainingFeatures')) data_val = np.array(f.get('validationFeatures')) del f print('==> Data sizes:',data_train.shape, data_val.shape) # Transform labels into on-hot encoding form enc = OneHotEncoder(n_values = 71) ''' NN config parameters ''' sub_window_size = 32 num_features = 169*sub_window_size num_frames = 32 hidden_layer_size = 2000 num_bits = 2000 num_classes = 71 print("Number of features:", num_features) print("Number of songs:",num_classes) # Reshape input features X_train = np.reshape(data_train,(-1, num_features)) X_val = np.reshape(data_val,(-1, num_features)) print("Input sizes:", X_train.shape, X_val.shape) y_train = [] y_val = [] # Add Labels for label in range(num_classes): for sampleCount in range(X_train.shape[0]//num_classes): y_train.append([label]) for sampleCount in range(X_val.shape[0]//num_classes): y_val.append([label]) X_train = np.concatenate((X_train, y_train), axis=1) X_val = np.concatenate((X_val, y_val), axis=1) # Shuffle np.random.shuffle(X_train) np.random.shuffle(X_val) # Separate coefficients and labels y_train = X_train[:, -1].reshape(-1, 1) X_train = X_train[:, :-1] y_val = X_val[:, -1].reshape(-1, 1) X_val = X_val[:, :-1] print('==> Data sizes:',X_train.shape, y_train.shape,X_val.shape, y_val.shape) y_train = enc.fit_transform(y_train.copy()).astype(int).toarray() y_val = enc.fit_transform(y_val.copy()).astype(int).toarray() plotx = [] ploty_train = [] ploty_val = [] # Set-up NN layers x = tf.placeholder(tf.float64, [None, num_features]) W1 = weight_variable([num_features, hidden_layer_size], "W1") b1 = bias_variable([hidden_layer_size], "b1") OpW1 = tf.placeholder(tf.float64, [num_features, hidden_layer_size]) Opb1 = tf.placeholder(tf.float64, [hidden_layer_size]) # Hidden layer activation function: ReLU h1 = batch_nm(tf.nn.relu(tf.matmul(x, W1) + b1)) W2 = weight_variable([hidden_layer_size, num_bits], "W2") b2 = bias_variable([num_bits], "b2") OpW2 = tf.placeholder(tf.float64, [hidden_layer_size, num_bits]) Opb2 = tf.placeholder(tf.float64, [num_bits]) # Pre-activation value for bit representation h = tf.matmul(h1, W2) + b2 h2 = batch_nm(tf.nn.relu(tf.matmul(h1, W2) + b2)) # dropout keep_prob = tf.placeholder(tf.float64) h2_drop = tf.nn.dropout(h2, keep_prob) W3 = weight_variable([num_bits, num_classes], "W3") b3 = bias_variable([num_classes], "b3") OpW3 = tf.placeholder(tf.float64, [num_bits, num_classes]) Opb3 = tf.placeholder(tf.float64, [num_classes]) # Softmax layer (Output), dtype = float64 y = tf.matmul(h2_drop, W3) + b3 # NN desired value (labels) y_ = tf.placeholder(tf.float64, [None, num_classes]) # Loss function cross_entropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_, logits=y)) train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy) sess = tf.InteractiveSession() correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float64)) sess.run(tf.initialize_all_variables()) # Training numTrainingVec = len(X_train) batchSize = 500 numEpochs = 1000 bestValErr = 10000 bestValEpoch = 0 startTime = time.time() for epoch in range(numEpochs): for i in range(0,numTrainingVec,batchSize): # Batch Data batchEndPoint = min(i+batchSize, numTrainingVec) trainBatchData = X_train[i:batchEndPoint] trainBatchLabel = y_train[i:batchEndPoint] train_step.run(feed_dict={x: trainBatchData, y_: trainBatchLabel, keep_prob: 0.5}) # Print accuracy if epoch % 5 == 0 or epoch == numEpochs-1: plotx.append(epoch) train_error = cross_entropy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel, keep_prob: 1.0}) train_acc = accuracy.eval(feed_dict={x:trainBatchData, y_: trainBatchLabel, keep_prob: 1.0}) val_error = cross_entropy.eval(feed_dict={x:X_val, y_: y_val, keep_prob: 1.0}) val_acc = accuracy.eval(feed_dict={x:X_val, y_: y_val, keep_prob: 1.0}) ploty_train.append(train_error) ploty_val.append(val_error) print("epoch: %d, val error %g, train error %g"%(epoch, val_error, train_error)) if val_error < bestValErr: bestValErr = val_error bestValEpoch = epoch OpW1 = W1 Opb1 = b1 OpW2 = W2 Opb2 = b2 OpW3 = W3 Opb3 = b3 endTime = time.time() print("Elapse Time:", endTime - startTime) print("Best validation error: %g at epoch %d"%(bestValErr, bestValEpoch)) # Restore best model for early stopping W1 = OpW1 b1 = Opb1 W2 = OpW2 b2 = Opb2 W3 = OpW3 b3 = Opb3 saveweight = {} saveweight['W1'] = np.array(W1.eval()) saveweight['b1'] = np.array(b1.eval()) saveweight['W2'] = np.array(W2.eval()) saveweight['b2'] = np.array(b2.eval()) scipy.io.savemat('exp8g_db_weight.mat',saveweight) print('==> Generating error plot...') errfig = plt.figure() trainErrPlot = errfig.add_subplot(111) trainErrPlot.set_xlabel('Number of Epochs') trainErrPlot.set_ylabel('Cross-Entropy Error') trainErrPlot.set_title('Error vs Number of Epochs') trainErrPlot.scatter(plotx, ploty_train) valErrPlot = errfig.add_subplot(111) valErrPlot.scatter(plotx, ploty_val) errfig.savefig('exp8g_db.png') ''' GENERATING REPRESENTATION OF NOISY FILES ''' namelist = ['orig','comp5','comp10','str5','str10','ampSat_(-15)','ampSat_(-10)','ampSat_(-5)', \ 'ampSat_(5)','ampSat_(10)','ampSat_(15)','pitchShift_(-1)','pitchShift_(-0.5)', \ 'pitchShift_(0.5)','pitchShift_(1)','rev_dkw','rev_gal','rev_shan0','rev_shan1', \ 'rev_gen','crowd-15','crowd-10','crowd-5','crowd0','crowd5','crowd10','crowd15', \ 'crowd100','rest-15','rest-10','rest-5','rest0','rest5','rest10','rest15', \ 'rest100','AWGN-15','AWGN-10','AWGN-5','AWGN0','AWGN5','AWGN10','AWGN15', 'AWGN100'] outdir = '/scratch/ttanpras/taylorswift_noisy_processed/' repDict = {} # Loop over each CQT files, not shuffled for count in range(len(namelist)): name = namelist[count] filename = outdir + name + '.mat' cqt = scipy.io.loadmat(filename)['Q'] cqt = np.transpose(np.array(cqt)) # print(cqt.shape) # Group into windows of 32 without overlapping # Discard any leftover frames num_windows = cqt.shape[0] // 32 cqt = cqt[:32*num_windows] X = np.reshape(cqt,(num_windows, num_features)) # Feed window through model (Only 1 layer of weight w/o non-linearity) rep = h.eval(feed_dict={x:X}) # Put the output representation into a dictionary repDict['n'+str(count)] = rep scipy.io.savemat('exp8g_db_repNon.mat',repDict)
# -*- coding: utf-8 -*- u"""WARP execution template. :copyright: Copyright (c) 2015-2019 RadiaSoft LLC. All Rights Reserved. :license: http://www.apache.org/licenses/LICENSE-2.0.html """ from __future__ import absolute_import, division, print_function from opmd_viewer import OpenPMDTimeSeries from opmd_viewer.openpmd_timeseries import main from opmd_viewer.openpmd_timeseries.data_reader import field_reader from pykern import pkcollections from pykern import pkio from pykern.pkcollections import PKDict from pykern.pkdebug import pkdc, pkdp from sirepo import simulation_db from sirepo.template import template_common import h5py import numpy import os import os.path import py.path import re import sirepo.sim_data _SIM_DATA, SIM_TYPE, _SCHEMA = sirepo.sim_data.template_globals() WANT_BROWSER_FRAME_CACHE = True def background_percent_complete(report, run_dir, is_running): files = _h5_file_list(run_dir) if len(files) < 2: return PKDict( percentComplete=0, frameCount=0, ) file_index = len(files) - 1 last_update_time = int(os.path.getmtime(str(files[file_index]))) # look at 2nd to last file if running, last one may be incomplete if is_running: file_index -= 1 data = simulation_db.read_json(run_dir.join(template_common.INPUT_BASE_NAME)) Fr, info = field_reader.read_field_circ(str(files[file_index]), 'E/r') plasma_length = float(data.models.electronPlasma.length) / 1e3 zmin = float(data.models.simulationGrid.zMin) / 1e6 percent_complete = (info.imshow_extent[1] / (plasma_length - zmin)) if percent_complete < 0: percent_complete = 0 elif percent_complete > 1.0: percent_complete = 1.0 return PKDict( lastUpdateTime=last_update_time, percentComplete=percent_complete * 100, frameCount=file_index + 1, ) def extract_field_report(field, coordinate, mode, data_file): opmd = _opmd_time_series(data_file) F, info = opmd.get_field( plot=False, vmin=None, m=mode, coord=coordinate, iteration=numpy.array([data_file.iteration]), slicing=0.0, field=field, theta=0.0, vmax=None, output=True, slicing_dir='y', ) extent = info.imshow_extent if field == 'rho': field_label = field else: field_label = '{} {}'.format(field, coordinate) return PKDict( x_range=[extent[0], extent[1], len(F[0])], y_range=[extent[2], extent[3], len(F)], x_label='{} [m]'.format(info.axes[1]), y_label='{} [m]'.format(info.axes[0]), title="{} in the mode {} at {}".format( field_label, mode, _iteration_title(opmd, data_file)), z_matrix=numpy.flipud(F).tolist(), ) def extract_particle_report(frame_args, particle_type): data_file = open_data_file(frame_args.run_dir, frame_args.frameIndex) xarg = frame_args.x yarg = frame_args.y nbins = frame_args.histogramBins opmd = _opmd_time_series(data_file) data_list = opmd.get_particle( var_list=[xarg, yarg], species=particle_type, iteration=numpy.array([data_file.iteration]), select=None, output=True, plot=False, ) with h5py.File(data_file.filename) as f: data_list.append(main.read_species_data(f, particle_type, 'w', ())) select = _particle_selection_args(frame_args) if select: with h5py.File(data_file.filename) as f: main.apply_selection(f, data_list, select, particle_type, ()) xunits = ' [m]' if len(xarg) == 1 else '' yunits = ' [m]' if len(yarg) == 1 else '' if len(xarg) == 1: data_list[0] /= 1e6 if len(yarg) == 1: data_list[1] /= 1e6 if xarg == 'z': data_list = _adjust_z_width(data_list, data_file) hist, edges = numpy.histogramdd( [data_list[0], data_list[1]], template_common.histogram_bins(nbins), weights=data_list[2], range=[_select_range(data_list[0], xarg, select), _select_range(data_list[1], yarg, select)], ) return PKDict( x_range=[float(edges[0][0]), float(edges[0][-1]), len(hist)], y_range=[float(edges[1][0]), float(edges[1][-1]), len(hist[0])], x_label='{}{}'.format(xarg, xunits), y_label='{}{}'.format(yarg, yunits), title='t = {}'.format(_iteration_title(opmd, data_file)), z_matrix=hist.T.tolist(), frameCount=data_file.num_frames, ) def generate_parameters_file(data, is_parallel=False): template_common.validate_models(data, _SCHEMA) res, v = template_common.generate_parameters_file(data) v['isAnimationView'] = is_parallel v['incSteps'] = 50 v['diagnosticPeriod'] = 50 if data['models']['simulation']['sourceType'] == 'electronBeam': v['useBeam'] = 1 v['useLaser'] = 0 else: v['useBeam'] = 0 v['useLaser'] = 1 if data['models']['electronBeam']['beamRadiusMethod'] == 'a': v['electronBeam_transverseEmittance'] = 0 return res + template_common.render_jinja(SIM_TYPE, v) def get_data_file(run_dir, model, frame, **kwargs): files = _h5_file_list(run_dir) #TODO(pjm): last client file may have been deleted on a canceled animation, # give the last available file instead. if len(files) < frame + 1: frame = -1 filename = str(files[int(frame)]) with open(filename) as f: return os.path.basename(filename), f.read(), 'application/octet-stream' def new_simulation(data, new_simulation_data): source = new_simulation_data['sourceType'] if not source: source = 'laserPulse' data['models']['simulation']['sourceType'] = source if source == 'electronBeam': grid = data['models']['simulationGrid'] grid['gridDimensions'] = 'e' grid['rCellResolution'] = 20 grid['rCellsPerSpotSize'] = 8 grid['rCount'] = 100 grid['rLength'] = 264.0501846240597 grid['rMax'] = 264.0501846240597 grid['rMin'] = 0 grid['rParticlesPerCell'] = 2 grid['rScale'] = 5 grid['zCellResolution'] = 30 grid['zCellsPerWavelength'] = 8 grid['zCount'] = 90 grid['zLength'] = 316.86022154887166 grid['zMax'] = 0 grid['zMin'] = -316.86022154887166 grid['zParticlesPerCell'] = 2 grid['zScale'] = 3 data['models']['electronPlasma']['density'] = 1e23 data['models']['electronPlasma']['length'] = 1 data['models']['fieldAnimation']['coordinate'] = 'z' data['models']['fieldAnimation']['mode'] = '0' data['models']['particleAnimation']['histogramBins'] = 90 data['models']['particleAnimation']['yMin'] = -50 data['models']['particleAnimation']['yMax'] = 50 data['models']['beamAnimation']['histogramBins'] = 91 data['models']['beamPreviewReport']['histogramBins'] = 91 def open_data_file(run_dir, file_index=None): """Opens data file_index'th in run_dir Args: run_dir (py.path): has subdir ``hdf5`` file_index (int): which file to open (default: last one) files (list): list of files (default: load list) Returns: PKDict: various parameters """ files = _h5_file_list(run_dir) res = PKDict() res.num_frames = len(files) res.frame_index = res.num_frames - 1 if file_index is None else file_index res.filename = str(files[res.frame_index]) res.iteration = int(re.search(r'data(\d+)', res.filename).group(1)) return res def python_source_for_model(data, model): return generate_parameters_file(data, is_parallel=True) def remove_last_frame(run_dir): files = _h5_file_list(run_dir) if len(files) > 0: pkio.unchecked_remove(files[-1]) def sim_frame_beamAnimation(frame_args): return extract_particle_report(frame_args, 'beam') def sim_frame_fieldAnimation(frame_args): f = open_data_file(frame_args.run_dir, frame_args.frameIndex) m = frame_args.mode if m != 'all': m = int(m) return extract_field_report( frame_args.field, frame_args.coordinate, m, f, ).pkupdate(frameCount=f.num_frames) def sim_frame_particleAnimation(frame_args): return extract_particle_report(frame_args, 'electrons') def write_parameters(data, run_dir, is_parallel): """Write the parameters file Args: data (dict): input run_dir (py.path): where to write is_parallel (bool): run in background? """ pkio.write_text( run_dir.join(template_common.PARAMETERS_PYTHON_FILE), generate_parameters_file( data, is_parallel, ), ) def _adjust_z_width(data_list, data_file): # match boundaries with field report Fr, info = field_reader.read_field_circ(data_file.filename, 'E/r') extent = info.imshow_extent return [ numpy.append(data_list[0], [extent[0], extent[1]]), numpy.append(data_list[1], [extent[2], extent[3]]), numpy.append(data_list[2], [0, 0]), ] def _h5_file_list(run_dir): return pkio.walk_tree( run_dir.join('hdf5'), r'\.h5$', ) def _iteration_title(opmd, data_file): fs = opmd.t[0] * 1e15 return '{:.1f} fs (iteration {})'.format(fs, data_file.iteration) def _opmd_time_series(data_file): prev = None try: prev = main.list_h5_files main.list_h5_files = lambda x: ([data_file.filename], [data_file.iteration]) return OpenPMDTimeSeries(py.path.local(data_file.filename).dirname) finally: if prev: main.list_h5_files = prev def _particle_selection_args(args): if not 'uxMin' in args: return None res = PKDict() for f in '', 'u': for f2 in 'x', 'y', 'z': field = '{}{}'.format(f, f2) min = float(args[field + 'Min']) max = float(args[field + 'Max']) if min == 0 and max == 0: continue res[field] = [min, max] return res if len(res.keys()) else None def _select_range(values, arg, select): if select and arg in select: if arg in ('x', 'y', 'z'): return [select[arg][0] / 1e6, select[arg][1] / 1e6] return select[arg] return [min(values), max(values)]
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for SoftmaxOp and LogSoftmaxOp.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import unittest import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import errors_impl from tensorflow.python.framework import test_util from tensorflow.python.ops import array_ops from tensorflow.python.ops import nn_ops from tensorflow.python.platform import test from tensorflow.python.platform import tf_logging as logging class SoftmaxTest(test.TestCase): def _npSoftmax(self, features, dim=-1, log=False): if dim == -1: dim = len(features.shape) - 1 one_only_on_dim = list(features.shape) one_only_on_dim[dim] = 1 is_fp16 = features.dtype == np.float16 if is_fp16: # Do the compute in fp32 and cast the input back to fp32. features = features.astype(np.float32) e = np.exp(features - np.reshape( np.amax( features, axis=dim), one_only_on_dim)) softmax = e / np.reshape(np.sum(e, axis=dim), one_only_on_dim) if log: res = np.log(softmax) else: res = softmax if is_fp16: res = res.astype(np.float16) return res def _testSoftmax(self, np_features, dim=-1, log=False, use_gpu=False): # A previous version of the code checked the op name rather than the op type # to distinguish between log and non-log. Use an arbitrary name to catch # this bug in future. name = "arbitrary" np_softmax = self._npSoftmax(np_features, dim=dim, log=log) with self.cached_session(use_gpu=use_gpu): if log: tf_softmax = nn_ops.log_softmax(np_features, axis=dim, name=name) else: tf_softmax = nn_ops.softmax(np_features, axis=dim, name=name) out = self.evaluate(tf_softmax) self.assertAllCloseAccordingToType(np_softmax, out) self.assertShapeEqual(np_softmax, tf_softmax) if not log: # Bonus check: the softmaxes should add to one in dimension dim. sum_along_dim = np.sum(out, axis=dim) self.assertAllCloseAccordingToType( np.ones(sum_along_dim.shape), sum_along_dim) def _testAll(self, features): self._testSoftmax(features, use_gpu=True) self._testSoftmax(features, log=True, use_gpu=True) self._testOverflow(use_gpu=True) def testNpSoftmax(self): features = [[1., 1., 1., 1.], [1., 2., 3., 4.]] # Batch 0: All exps are 1. The expected result is # Softmaxes = [0.25, 0.25, 0.25, 0.25] # LogSoftmaxes = [-1.386294, -1.386294, -1.386294, -1.386294] # # Batch 1: # exps = [1., 2.718, 7.389, 20.085] # sum = 31.192 # Softmaxes = exps / sum = [0.0320586, 0.08714432, 0.23688282, 0.64391426] # LogSoftmaxes = [-3.44019 , -2.44019 , -1.44019 , -0.44019] np_sm = self._npSoftmax(np.array(features)) self.assertAllClose( np.array([[0.25, 0.25, 0.25, 0.25], [0.0320586, 0.08714432, 0.23688282, 0.64391426]]), np_sm, rtol=1.e-5, atol=1.e-5) np_lsm = self._npSoftmax(np.array(features), log=True) self.assertAllClose( np.array([[-1.386294, -1.386294, -1.386294, -1.386294], [-3.4401897, -2.4401897, -1.4401897, -0.4401897]]), np_lsm, rtol=1.e-5, atol=1.e-5) def _testOverflow(self, use_gpu=False): if use_gpu: type = np.float32 # pylint: disable=redefined-builtin else: type = np.float64 # pylint: disable=redefined-builtin max = np.finfo(type).max # pylint: disable=redefined-builtin features = np.array([[1., 1., 1., 1.], [max, 1., 2., 3.]]).astype(type) with self.cached_session(use_gpu=use_gpu): tf_log_softmax = nn_ops.log_softmax(features) out = self.evaluate(tf_log_softmax) self.assertAllClose( np.array([[-1.386294, -1.386294, -1.386294, -1.386294], [0, -max, -max, -max]]), out, rtol=1.e-5, atol=1.e-5) def testFloat(self): self._testAll( np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float32)) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testFloatGPU(self): if test.is_gpu_available(cuda_only=True): rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)] cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)] for row, col in zip(rows, cols): logging.info("Testing softmax float dtype in shape [%d, %d]", row, col) data = np.random.rand(row, col) self._testAll(data.astype(np.float32)) def testHalf(self): self._testAll( np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float16)) @unittest.skipUnless(test.is_built_with_cuda(), "Test only applicable when running on GPUs") def testHalfGPU(self): if test.is_gpu_available(cuda_only=True): rows = [2**x + np.random.randint(0, 16) for x in range(1, 4)] cols = [2**x + np.random.randint(0, 16) for x in range(1, 4)] for row, col in zip(rows, cols): logging.info("Testing softmax half dtype in shape [%d, %d]", row, col) data = np.random.rand(row, col) self._testAll(data.astype(np.float16)) def testDouble(self): self._testSoftmax( np.array([[1., 1., 1., 1.], [1., 2., 3., 4.]]).astype(np.float64)) self._testOverflow() def test1DTensorAsInput(self): self._testSoftmax( np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False) self._testOverflow(use_gpu=False) def test1DTensorAsInputNoReshape(self): self._testSoftmax( np.array([3., 2., 3., 9.]).astype(np.float64), use_gpu=False) self._testOverflow(use_gpu=False) def test3DTensorAsInput(self): self._testSoftmax( np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]], [[2., 3., 4., 5.], [6., 7., 8., 9.]], [[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32), use_gpu=False) self._testOverflow(use_gpu=False) def test3DTensorAsInputNoReshape(self): self._testSoftmax( np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]], [[2., 3., 4., 5.], [6., 7., 8., 9.]], [[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32), use_gpu=False) self._testOverflow(use_gpu=False) def testAlongFirstDimension(self): self._testSoftmax( np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]], [[2., 3., 4., 5.], [6., 7., 8., 9.]], [[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32), dim=0, use_gpu=False) self._testOverflow(use_gpu=False) def testAlongSecondDimension(self): self._testSoftmax( np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]], [[2., 3., 4., 5.], [6., 7., 8., 9.]], [[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32), dim=1, use_gpu=False) self._testOverflow(use_gpu=False) def testAlongNegativeDimension(self): self._testSoftmax( np.array([[[1., 1., 1., 1.], [1., 2., 3., 4.]], [[2., 3., 4., 5.], [6., 7., 8., 9.]], [[5., 4., 3., 2.], [1., 2., 3., 4.]]]).astype(np.float32), dim=-2, use_gpu=False) self._testOverflow(use_gpu=False) def testShapeInference(self): op = nn_ops.softmax([[[1., 1., 1., 1.], [1., 2., 3., 4.]], [[2., 3., 4., 5.], [6., 7., 8., 9.]], [[5., 4., 3., 2.], [1., 2., 3., 4.]]]) self.assertEqual([3, 2, 4], op.get_shape()) @test_util.run_deprecated_v1 def testEmptyInput(self): with self.cached_session(): x = array_ops.placeholder(dtypes.float32, shape=[0, 3]) self.assertEqual(0, array_ops.size(x).eval()) # reshape would raise if logits is empty with self.assertRaises(errors_impl.InvalidArgumentError): nn_ops.softmax(x, axis=0).eval() def testDimTooLarge(self): with self.cached_session(): # Use placeholder to make sure we get runtime error instead of shape # inference error. dim = array_ops.placeholder_with_default(100, shape=[]) with self.assertRaises(errors_impl.InvalidArgumentError): nn_ops.softmax([1., 2., 3., 4.], axis=dim).eval() def testInvalidAxis(self): # Test case for GitHub issue 22793. with self.cached_session(): ones = array_ops.ones(shape=[2, 3]) with self.assertRaises(errors_impl.InvalidArgumentError): nn_ops.softmax(ones, axis=2).eval() @test_util.run_deprecated_v1 def testLargeDims(self): # Make sure that we properly handle large inputs. See # https://github.com/tensorflow/tensorflow/issues/4425 for details for dims in [129, 256]: ones = np.random.rand(dims, dims).astype(np.float32) np_softmax = self._npSoftmax(ones) for use_gpu in [True, False]: with self.cached_session(use_gpu=use_gpu) as sess: x = array_ops.placeholder(dtypes.float32) y = nn_ops.softmax(x) tf_softmax = sess.run(y, feed_dict={x: ones}) self.assertAllClose(tf_softmax, np_softmax) if __name__ == "__main__": test.main()
# Standard python modules. import time import sys # citest modules. import citest.gcp_testing as gcp import citest.json_predicate as jp import citest.service_testing as st # Spinnaker modules. import spinnaker_testing as sk import spinnaker_testing.gate as gate import citest.base class GoogleServerGroupTestScenario(sk.SpinnakerTestScenario): MINIMUM_PROJECT_QUOTA = { 'INSTANCE_TEMPLATES': 1, 'HEALTH_CHECKS': 1, 'FORWARDING_RULES': 1, 'IN_USE_ADDRESSES': 3, 'TARGET_POOLS': 1, } MINIMUM_REGION_QUOTA = { 'CPUS': 3, 'IN_USE_ADDRESSES': 3, 'INSTANCE_GROUP_MANAGERS': 2, 'INSTANCES': 3, } @classmethod def new_agent(cls, bindings): '''Implements the base class interface to create a new agent. This method is called by the base classes during setup/initialization. Args: bindings: The bindings dictionary with configuration information that this factory can draw from to initialize. If the factory would like additional custom bindings it could add them to initArgumentParser. Returns: A citest.service_testing.BaseAgent that can interact with Gate. This is the agent that test operations will be posted to. ''' return gate.new_agent(bindings) def __init__(self, bindings, agent=None): super(GoogleServerGroupTestScenario, self).__init__(bindings, agent) # Our application name and path to post events to. self.TEST_APP = bindings['TEST_APP'] self.__path = 'applications/%s/tasks' % self.TEST_APP # The spinnaker stack decorator for our resources. self.TEST_STACK = bindings['TEST_STACK'] self.TEST_REGION = bindings['TEST_GCE_REGION'] self.TEST_ZONE = bindings['TEST_GCE_ZONE'] # Resource names used among tests. self.__cluster_name = '%s-%s' % (self.TEST_APP, self.TEST_STACK) self.__server_group_name = '%s-v000' % self.__cluster_name self.__cloned_server_group_name = '%s-v001' % self.__cluster_name self.__lb_name = '%s-%s-fe' % (self.TEST_APP, self.TEST_STACK) def create_load_balancer(self): job = [{ 'cloudProvider': 'gce', 'loadBalancerName': self.__lb_name, 'ipProtocol': 'TCP', 'portRange': '8080', 'provider': 'gce', 'stack': self.TEST_STACK, 'detail': 'frontend', 'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'region': self.TEST_REGION, 'listeners': [{ 'protocol': 'TCP', 'portRange': '8080', 'healthCheck': False }], 'name': self.__lb_name, 'type': 'upsertLoadBalancer', 'availabilityZones': {self.TEST_REGION: []}, 'user': 'integration-tests' }] builder = gcp.GcpContractBuilder(self.gcp_observer) (builder.new_clause_builder('Load Balancer Created', retryable_for_secs=30) .list_resource('forwardingRules') .contains_path_value('name', self.__lb_name)) payload = self.agent.make_json_payload_from_kwargs( job=job, description='Server Group Test - create load balancer', application=self.TEST_APP) return st.OperationContract( self.new_post_operation( title='create_load_balancer', data=payload, path=self.__path), contract=builder.build()) def create_instances(self): job = [{ 'application': self.TEST_APP, 'stack': self.TEST_STACK, 'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'zone': self.TEST_ZONE, 'network': 'default', 'targetSize': 1, 'capacity': { 'min': 1, 'max': 1, 'desired': 1 }, 'availabilityZones': { self.TEST_REGION: [self.TEST_ZONE] }, 'loadBalancers': [self.__lb_name], 'instanceMetadata': { 'load-balancer-names': self.__lb_name }, 'cloudProvider': 'gce', 'image': self.bindings['TEST_GCE_IMAGE_NAME'], 'instanceType': 'f1-micro', 'initialNumReplicas': 1, 'type': 'createServerGroup', 'account': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'user': 'integration-tests' }] builder = gcp.GcpContractBuilder(self.gcp_observer) (builder.new_clause_builder('Instance Created', retryable_for_secs=150) .list_resource('instanceGroups') .contains_path_value('name', self.__server_group_name)) payload = self.agent.make_json_payload_from_kwargs( job=job, description='Server Group Test - create initial server group', application=self.TEST_APP) return st.OperationContract( self.new_post_operation( title='create_instances', data=payload, path=self.__path), contract=builder.build()) def resize_server_group(self): job = [{ 'targetSize': 2, 'capacity': { 'min': 2, 'max': 2, 'desired': 2 }, 'replicaPoolName': self.__server_group_name, 'numReplicas': 2, 'region': self.TEST_REGION, 'zone': self.TEST_ZONE, 'asgName': self.__server_group_name, 'serverGroupName': self.__server_group_name, 'type': 'resizeServerGroup', 'regions': [self.TEST_REGION], 'zones': [self.TEST_ZONE], 'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'cloudProvider': 'gce', 'user': 'integration-tests' }] builder = gcp.GcpContractBuilder(self.gcp_observer) (builder.new_clause_builder('Server Group Resized', retryable_for_secs=90) .inspect_resource('instanceGroups', self.__server_group_name, ['--zone', self.TEST_ZONE]) .contains_path_eq('size', 2)) payload = self.agent.make_json_payload_from_kwargs( job=job, description='Server Group Test - resize to 2 instances', application=self.TEST_APP) return st.OperationContract( self.new_post_operation( title='resize_instances', data=payload, path=self.__path), contract=builder.build()) def clone_server_group(self): job = [{ 'application': self.TEST_APP, 'stack': self.TEST_STACK, 'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'loadBalancers': [self.__lb_name], 'targetSize': 1, 'capacity': { 'min': 1, 'max': 1, 'desired': 1 }, 'zone': self.TEST_ZONE, 'network': 'default', 'instanceMetadata': {'load-balancer-names': self.__lb_name}, 'availabilityZones': {self.TEST_REGION: [self.TEST_ZONE]}, 'cloudProvider': 'gce', 'source': { 'account': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'region': self.TEST_REGION, 'zone': self.TEST_ZONE, 'serverGroupName': self.__server_group_name, 'asgName': self.__server_group_name }, 'instanceType': 'f1-micro', 'image': self.bindings['TEST_GCE_IMAGE_NAME'], 'initialNumReplicas': 1, 'loadBalancers': [self.__lb_name], 'type': 'cloneServerGroup', 'account': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'user': 'integration-tests' }] builder = gcp.GcpContractBuilder(self.gcp_observer) (builder.new_clause_builder('Server Group Cloned', retryable_for_secs=90) .list_resource('instanceGroupManagers') .contains_path_value('baseInstanceName', self.__cloned_server_group_name)) payload = self.agent.make_json_payload_from_kwargs( job=job, description='Server Group Test - clone server group', application=self.TEST_APP) return st.OperationContract( self.new_post_operation( title='clone_server_group', data=payload, path=self.__path), contract=builder.build()) def disable_server_group(self): job = [{ 'cloudProvider': 'gce', 'asgName': self.__server_group_name, 'serverGroupName': self.__server_group_name, 'region': self.TEST_REGION, 'zone': self.TEST_ZONE, 'type': 'disableServerGroup', 'regions': [self.TEST_REGION], 'zones': [self.TEST_ZONE], 'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'user': 'integration-tests' }] builder = gcp.GcpContractBuilder(self.gcp_observer) (builder.new_clause_builder('Server Group Disabled', retryable_for_secs=90) .list_resource('instanceGroupManagers') .contains_path_value('baseInstanceName', self.__server_group_name) .excludes_match({ 'baseInstanceName': jp.STR_SUBSTR(self.__server_group_name), 'targetPools': jp.LIST_MATCHES([jp.STR_SUBSTR('https')]) })) payload = self.agent.make_json_payload_from_kwargs( job=job, description='Server Group Test - disable server group', application=self.TEST_APP) return st.OperationContract( self.new_post_operation( title='disable_server_group', data=payload, path=self.__path), contract=builder.build()) def enable_server_group(self): job = [{ 'cloudProvider': 'gce', 'asgName': self.__server_group_name, 'serverGroupName': self.__server_group_name, 'region': self.TEST_REGION, 'zone': self.TEST_ZONE, 'type': 'enableServerGroup', 'regions': [self.TEST_REGION], 'zones': [self.TEST_ZONE], 'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'user': 'integration-tests' }] builder = gcp.GcpContractBuilder(self.gcp_observer) (builder.new_clause_builder('Server Group Enabled', retryable_for_secs=90) .list_resource('instanceGroupManagers') .contains_match({ 'baseInstanceName': jp.STR_SUBSTR(self.__server_group_name), 'targetPools': jp.LIST_MATCHES([jp.STR_SUBSTR( 'https')]) })) payload = self.agent.make_json_payload_from_kwargs( job=job, description='Server Group Test - enable server group', application=self.TEST_APP) return st.OperationContract( self.new_post_operation( title='enable_server_group', data=payload, path=self.__path), contract=builder.build()) def destroy_server_group(self, version): serverGroupName = '%s-%s' % (self.__cluster_name, version) job = [{ 'cloudProvider': 'gce', 'asgName': serverGroupName, 'serverGroupName': serverGroupName, 'region': self.TEST_REGION, 'zone': self.TEST_ZONE, 'type': 'destroyServerGroup', 'regions': [self.TEST_REGION], 'zones': [self.TEST_ZONE], 'credentials': self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], 'user': 'integration-tests' }] builder = gcp.GcpContractBuilder(self.gcp_observer) (builder.new_clause_builder('Server Group Destroyed', retryable_for_secs=90) .list_resource('instanceGroupManagers') .excludes_path_value('baseInstanceName', serverGroupName)) payload = self.agent.make_json_payload_from_kwargs( job=job, description='Server Group Test - destroy server group', application=self.TEST_APP) return st.OperationContract( self.new_post_operation( title='destroy_server_group', data=payload, path=self.__path), contract=builder.build()) def delete_load_balancer(self): job = [{ "loadBalancerName": self.__lb_name, "networkLoadBalancerName": self.__lb_name, "region": "us-central1", "type": "deleteLoadBalancer", "regions": ["us-central1"], "credentials": self.bindings['SPINNAKER_GOOGLE_ACCOUNT'], "cloudProvider": "gce", "user": "integration-tests" }] builder = gcp.GcpContractBuilder(self.gcp_observer) (builder.new_clause_builder('Load Balancer Created', retryable_for_secs=30) .list_resource('forwardingRules') .excludes_path_value('name', self.__lb_name)) payload = self.agent.make_json_payload_from_kwargs( job=job, description='Server Group Test - delete load balancer', application=self.TEST_APP) return st.OperationContract( self.new_post_operation( title='delete_load_balancer', data=payload, path=self.__path), contract=builder.build()) class GoogleServerGroupTest(st.AgentTestCase): @staticmethod def setUpClass(): runner = citest.base.TestRunner.global_runner() scenario = runner.get_shared_data(GoogleServerGroupTestScenario) managed_region = runner.bindings['TEST_GCE_REGION'] title = 'Check Quota for {0}'.format(scenario.__class__.__name__) verify_results = gcp.verify_quota( title, scenario.gcp_observer, project_quota=GoogleServerGroupTestScenario.MINIMUM_PROJECT_QUOTA, regions=[(managed_region, GoogleServerGroupTestScenario.MINIMUM_REGION_QUOTA)]) if not verify_results: raise RuntimeError('Insufficient Quota: {0}'.format(verify_results)) @property def scenario(self): return citest.base.TestRunner.global_runner().get_shared_data( GoogleServerGroupTestScenario) def test_a_create_load_balancer(self): self.run_test_case(self.scenario.create_load_balancer()) def test_b_create_server_group(self): self.run_test_case(self.scenario.create_instances()) def test_c_resize_server_group(self): self.run_test_case(self.scenario.resize_server_group()) def test_d_clone_server_group(self): self.run_test_case(self.scenario.clone_server_group(), # TODO(ewiseblatt): 20160314 # There is a lock contention race condition # in clouddriver that causes intermittent failure. max_retries=5) def test_e_disable_server_group(self): self.run_test_case(self.scenario.disable_server_group()) def test_f_enable_server_group(self): self.run_test_case(self.scenario.enable_server_group()) def test_g_destroy_server_group_v000(self): self.run_test_case(self.scenario.destroy_server_group('v000')) def test_h_destroy_server_group_v001(self): self.run_test_case(self.scenario.destroy_server_group('v001')) def test_z_delete_load_balancer(self): self.run_test_case(self.scenario.delete_load_balancer()) def main(): defaults = { 'TEST_STACK': GoogleServerGroupTestScenario.DEFAULT_TEST_ID, 'TEST_APP': 'gcpsvrgrptst' + GoogleServerGroupTestScenario.DEFAULT_TEST_ID } return citest.base.TestRunner.main( parser_inits=[GoogleServerGroupTestScenario.initArgumentParser], default_binding_overrides=defaults, test_case_list=[GoogleServerGroupTest]) if __name__ == '__main__': sys.exit(main())
from __future__ import absolute_import import time from celery import group, subtask, Task from celery.utils.log import get_task_logger from scdown.celery import app from scdown.sc import (USER, USER_TRACKS, USER_FOLLOWINGS, USER_FOLLOWERS, USER_WEB_PROFILES, TRACK_COMMENTS, TRACK_FAVORITERS, Sc) from scdown.s3 import S3 from scdown.neo import (Neo, NODE_USER, NODE_TRACK, NODE_COMMENT, NODE_PROFILE, REL_FOLLOWS, REL_UPLOADED, REL_FAVORITED, REL_HAS_PROFILE, REL_WROTE, REL_REFERS_TO) logger = get_task_logger(__name__) class DatabaseTask(Task): abstract = True _neo = None _sc = None _s3 = None @property def neo(self): if self._neo is None: self._neo = Neo() return self._neo @property def sc(self): from scdown.celeryconfig import MONGOLAB_DB if self._sc is None: self._sc = Sc(db_name=MONGOLAB_DB, logger=logger) return self._sc @property def s3(self): if self._s3 is None: self._s3 = S3(logger=logger) return self._s3 def now(): return long(time.time()) def process_user(user_id): retrieve = fetch.s(USER, user_id) | store.s(res_type=NODE_USER) store_user_list = store_list.s(node_type=NODE_USER) get_followings = fetch_from.s(template=USER_FOLLOWINGS) get_followers = fetch_from.s(template=USER_FOLLOWERS) get_profiles = fetch_from.s(template=USER_WEB_PROFILES) get_tracks = fetch_from.s(template=USER_TRACKS) get_comments = fetch_from.s(template=TRACK_COMMENTS) get_favoriters = fetch_from.s(template=TRACK_FAVORITERS) followed = (get_followers | store_user_list | relate.s(rel_type=REL_FOLLOWS, timestamp=True)) follows = (get_followings | store_user_list | relate.s(rel_type=REL_FOLLOWS, reverse=True, timestamp=True)) web = (get_profiles | store_list.s(NODE_PROFILE) | relate.s(rel_type=REL_HAS_PROFILE, timestamp=True)) tracks = (get_tracks | store_list.s(NODE_TRACK) | relate.s(rel_type=REL_UPLOADED)) cmnts = foreach.s(callback=(get_comments | store_list.s(node_type=NODE_COMMENT) | relate_comments.s())) favoriters = foreach.s(callback=(get_favoriters | store_list.s(node_type=NODE_USER) | relate.s(rel_type=REL_FAVORITED, reverse=True, timestamp=True))) download = foreach.s(callback=get_audio.s()) track_g = group(cmnts, favoriters, download) (retrieve | group(followed, follows, web, tracks | track_g)).apply_async() @app.task def foreach(it, callback): # Apply a callback for each item in an iterator tasks = [] for arg in it: st = subtask(callback) tasks.append(st.clone(args=[arg, ])) group(tasks).apply_async() @app.task(base=DatabaseTask) def fetch(template, _id): sc = fetch.sc return sc.get_sc(template, _id) @app.task(base=DatabaseTask) def store(res, res_type): neo = store.neo node = neo.create_or_update_node(res_type, res) return node._id @app.task(base=DatabaseTask) def fetch_from(node_id, template=None): sc = fetch_from.sc neo = fetch_from.neo node = neo.get(node_id) remote_id = node.properties["id"] return (node_id, sc.get_sc(template, remote_id)) @app.task(base=DatabaseTask) def store_list(node_and_reslst, node_type): neo = store_list.neo main_node_id, reslst = node_and_reslst node_ids = [neo.create_or_update_node(node_type, x)._id for x in reslst] return (main_node_id, node_ids) @app.task(base=DatabaseTask) def relate(x_ys, rel_type=None, reverse=False, timestamp=False): neo = relate.neo x_id, y_ids = x_ys x = neo.get(x_id) ys = [neo.get(y) for y in y_ids] props = {"as_of": now()} if timestamp else {} if reverse: rels = [neo.mk_relation(y, rel_type, x, props=props) for y in ys] else: rels = [neo.mk_relation(x, rel_type, y, props=props) for y in ys] neo.create_all(rels) return y_ids @app.task(base=DatabaseTask) def get_audio(track_node_id): neo = get_audio.neo track_node = neo.get(track_node_id) if "s3_key" in track_node.properties: return url = None if track_node.properties["downloadable"]: url = track_node.properties["download_url"] elif track_node.properties["streamable"]: url = track_node.properties["stream_url"] if url is not None: chain = (store_in_s3.s(track_node_id, url) | save_s3_link.s()) chain.apply_async() @app.task(base=DatabaseTask) def store_in_s3(track_node_id, url): sc = store_in_s3.sc s3 = store_in_s3.s3 neo = store_in_s3.neo track_node = neo.get(track_node_id) track_id = track_node.properties["id"] fname = "{}.mp3".format(track_id) if s3.check_s3_for(fname): logger.info("Found {} on S3".format(fname)) return (None, None) else: stream = sc.get_sc(url) return (track_node_id, s3.put_stream_in_s3(fname, stream)) @app.task(base=DatabaseTask, ignore_result=True) def save_s3_link(track_s3): track_node_id, s3_key = track_s3 if s3_key is not None: neo = save_s3_link.neo track_node = neo.get(track_node_id) track_id = track_node.properties["id"] props = {"id": track_id, "s3_key": s3_key} neo.create_or_update_node("Track", props) @app.task(base=DatabaseTask, ignore_result=True) def relate_comments(track_comments): neo = relate_comments.neo track_node_id, comment_node_ids = track_comments track_node = neo.get(track_node_id) comment_nodes = [neo.get(c) for c in comment_node_ids] user_comments = [ (neo.create_or_update_node(NODE_USER, neo.inflate(c.properties)["user"]), c) for c in comment_nodes] refs = [neo.mk_relation(c, REL_REFERS_TO, track_node) for c in comment_nodes] writes = [neo.mk_relation(u, REL_WROTE, c) for u, c in user_comments] neo.create_all(refs + writes)
from django.apps.registry import apps as global_apps from django.db import migrations, router from .exceptions import InvalidMigrationPlan from .loader import MigrationLoader from .recorder import MigrationRecorder from .state import ProjectState class MigrationExecutor: """ End-to-end migration execution - load migrations and run them up or down to a specified set of targets. """ def __init__(self, connection, progress_callback=None): self.connection = connection self.loader = MigrationLoader(self.connection) self.recorder = MigrationRecorder(self.connection) self.progress_callback = progress_callback def migration_plan(self, targets, clean_start=False): """ Given a set of targets, return a list of (Migration instance, backwards?). """ plan = [] if clean_start: applied = set() else: applied = set(self.loader.applied_migrations) for target in targets: # If the target is (app_label, None), that means unmigrate everything if target[1] is None: for root in self.loader.graph.root_nodes(): if root[0] == target[0]: for migration in self.loader.graph.backwards_plan(root): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) # If the migration is already applied, do backwards mode, # otherwise do forwards mode. elif target in applied: # Don't migrate backwards all the way to the target node (that # may roll back dependencies in other apps that don't need to # be rolled back); instead roll back through target's immediate # child(ren) in the same app, and no further. next_in_app = sorted( n for n in self.loader.graph.node_map[target].children if n[0] == target[0] ) for node in next_in_app: for migration in self.loader.graph.backwards_plan(node): if migration in applied: plan.append((self.loader.graph.nodes[migration], True)) applied.remove(migration) else: for migration in self.loader.graph.forwards_plan(target): if migration not in applied: plan.append((self.loader.graph.nodes[migration], False)) applied.add(migration) return plan def _create_project_state(self, with_applied_migrations=False): """ Create a project state including all the applications without migrations and applied migrations if with_applied_migrations=True. """ state = ProjectState(real_apps=list(self.loader.unmigrated_apps)) if with_applied_migrations: # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } for migration, _ in full_plan: if migration in applied_migrations: migration.mutate_state(state, preserve=False) return state def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False): """ Migrate the database up to the given targets. Django first needs to create all project states before a migration is (un)applied and in a second step run all the database operations. """ # The django_migrations table must be present to record applied # migrations. self.recorder.ensure_schema() if plan is None: plan = self.migration_plan(targets) # Create the forwards plan Django would follow on an empty database full_plan = self.migration_plan(self.loader.graph.leaf_nodes(), clean_start=True) all_forwards = all(not backwards for mig, backwards in plan) all_backwards = all(backwards for mig, backwards in plan) if not plan: if state is None: # The resulting state should include applied migrations. state = self._create_project_state(with_applied_migrations=True) elif all_forwards == all_backwards: # This should only happen if there's a mixed plan raise InvalidMigrationPlan( "Migration plans with both forwards and backwards migrations " "are not supported. Please split your migration process into " "separate plans of only forwards OR backwards migrations.", plan ) elif all_forwards: if state is None: # The resulting state should still include applied migrations. state = self._create_project_state(with_applied_migrations=True) state = self._migrate_all_forwards(state, plan, full_plan, fake=fake, fake_initial=fake_initial) else: # No need to check for `elif all_backwards` here, as that condition # would always evaluate to true. state = self._migrate_all_backwards(plan, full_plan, fake=fake) self.check_replacements() return state def _migrate_all_forwards(self, state, plan, full_plan, fake, fake_initial): """ Take a list of 2-tuples of the form (migration instance, False) and apply them in the order they occur in the full_plan. """ migrations_to_run = {m[0] for m in plan} for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from these sets so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if 'apps' not in state.__dict__: if self.progress_callback: self.progress_callback("render_start") state.apps # Render all -- performance critical if self.progress_callback: self.progress_callback("render_success") state = self.apply_migration(state, migration, fake=fake, fake_initial=fake_initial) migrations_to_run.remove(migration) return state def _migrate_all_backwards(self, plan, full_plan, fake): """ Take a list of 2-tuples of the form (migration instance, True) and unapply them in reverse order they occur in the full_plan. Since unapplying a migration requires the project state prior to that migration, Django will compute the migration states before each of them in a first run over the plan and then unapply them in a second run over the plan. """ migrations_to_run = {m[0] for m in plan} # Holds all migration states prior to the migrations being unapplied states = {} state = self._create_project_state() applied_migrations = { self.loader.graph.nodes[key] for key in self.loader.applied_migrations if key in self.loader.graph.nodes } if self.progress_callback: self.progress_callback("render_start") for migration, _ in full_plan: if not migrations_to_run: # We remove every migration that we applied from this set so # that we can bail out once the last migration has been applied # and don't always run until the very end of the migration # process. break if migration in migrations_to_run: if 'apps' not in state.__dict__: state.apps # Render all -- performance critical # The state before this migration states[migration] = state # The old state keeps as-is, we continue with the new state state = migration.mutate_state(state, preserve=True) migrations_to_run.remove(migration) elif migration in applied_migrations: # Only mutate the state if the migration is actually applied # to make sure the resulting state doesn't include changes # from unrelated migrations. migration.mutate_state(state, preserve=False) if self.progress_callback: self.progress_callback("render_success") for migration, _ in plan: self.unapply_migration(states[migration], migration, fake=fake) applied_migrations.remove(migration) # Generate the post migration state by starting from the state before # the last migration is unapplied and mutating it to include all the # remaining applied migrations. last_unapplied_migration = plan[-1][0] state = states[last_unapplied_migration] for index, (migration, _) in enumerate(full_plan): if migration == last_unapplied_migration: for migration, _ in full_plan[index:]: if migration in applied_migrations: migration.mutate_state(state, preserve=False) break return state def collect_sql(self, plan): """ Take a migration plan and return a list of collected SQL statements that represent the best-efforts version of that plan. """ statements = [] state = None for migration, backwards in plan: with self.connection.schema_editor(collect_sql=True, atomic=migration.atomic) as schema_editor: if state is None: state = self.loader.project_state((migration.app_label, migration.name), at_end=False) if not backwards: state = migration.apply(state, schema_editor, collect_sql=True) else: state = migration.unapply(state, schema_editor, collect_sql=True) statements.extend(schema_editor.collected_sql) return statements def apply_migration(self, state, migration, fake=False, fake_initial=False): """Run a migration forwards.""" if self.progress_callback: self.progress_callback("apply_start", migration, fake) if not fake: if fake_initial: # Test to see if this is an already-applied initial migration applied, state = self.detect_soft_applied(state, migration) if applied: fake = True if not fake: # Alright, do it normally with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: state = migration.apply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_applied(app_label, name) else: self.recorder.record_applied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("apply_success", migration, fake) return state def unapply_migration(self, state, migration, fake=False): """Run a migration backwards.""" if self.progress_callback: self.progress_callback("unapply_start", migration, fake) if not fake: with self.connection.schema_editor(atomic=migration.atomic) as schema_editor: state = migration.unapply(state, schema_editor) # For replacement migrations, record individual statuses if migration.replaces: for app_label, name in migration.replaces: self.recorder.record_unapplied(app_label, name) else: self.recorder.record_unapplied(migration.app_label, migration.name) # Report progress if self.progress_callback: self.progress_callback("unapply_success", migration, fake) return state def check_replacements(self): """ Mark replacement migrations applied if their replaced set all are. Do this unconditionally on every migrate, rather than just when migrations are applied or unapplied, to correctly handle the case when a new squash migration is pushed to a deployment that already had all its replaced migrations applied. In this case no new migration will be applied, but the applied state of the squashed migration must be maintained. """ applied = self.recorder.applied_migrations() for key, migration in self.loader.replacements.items(): all_applied = all(m in applied for m in migration.replaces) if all_applied and key not in applied: self.recorder.record_applied(*key) def detect_soft_applied(self, project_state, migration): """ Test whether a migration has been implicitly applied - that the tables or columns it would create exist. This is intended only for use on initial migrations (as it only looks for CreateModel and AddField). """ def should_skip_detecting_model(migration, model): """ No need to detect tables for proxy models, unmanaged models, or models that can't be migrated on the current database. """ return ( model._meta.proxy or not model._meta.managed or not router.allow_migrate( self.connection.alias, migration.app_label, model_name=model._meta.model_name, ) ) if migration.initial is None: # Bail if the migration isn't the first one in its app if any(app == migration.app_label for app, name in migration.dependencies): return False, project_state elif migration.initial is False: # Bail if it's NOT an initial migration return False, project_state if project_state is None: after_state = self.loader.project_state((migration.app_label, migration.name), at_end=True) else: after_state = migration.mutate_state(project_state) apps = after_state.apps found_create_model_migration = False found_add_field_migration = False with self.connection.cursor() as cursor: existing_table_names = self.connection.introspection.table_names(cursor) # Make sure all create model and add field operations are done for operation in migration.operations: if isinstance(operation, migrations.CreateModel): model = apps.get_model(migration.app_label, operation.name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue if model._meta.db_table not in existing_table_names: return False, project_state found_create_model_migration = True elif isinstance(operation, migrations.AddField): model = apps.get_model(migration.app_label, operation.model_name) if model._meta.swapped: # We have to fetch the model to test with from the # main app cache, as it's not a direct dependency. model = global_apps.get_model(model._meta.swapped) if should_skip_detecting_model(migration, model): continue table = model._meta.db_table field = model._meta.get_field(operation.name) # Handle implicit many-to-many tables created by AddField. if field.many_to_many: if field.remote_field.through._meta.db_table not in existing_table_names: return False, project_state else: found_add_field_migration = True continue column_names = [ column.name for column in self.connection.introspection.get_table_description(self.connection.cursor(), table) ] if field.column not in column_names: return False, project_state found_add_field_migration = True # If we get this far and we found at least one CreateModel or AddField migration, # the migration is considered implicitly applied. return (found_create_model_migration or found_add_field_migration), after_state
""" A module of deep feature selection based on multilayer perceptrons. This module applies a deep structure with not too many hidden layers. Thus, stochastic gradient descent (back-prop) is used in optimization. Copyright (c) 2008-2013, Theano Development Team All rights reserved. Yifeng Li CMMT, UBC, Vancouver Sep 23, 2014 Contact: yifeng.li.cn@gmail.com """ from __future__ import division import pickle import time import math import copy import numpy import theano import theano.tensor as T from logistic_sgd import LogisticRegression import classification as cl def relu(x): return 0.5*(x+abs(x)) class InputLayer(object): def __init__(self, input, n_in, w=None): """ In the input layer x_i is multiplied by w_i. Yifeng Li, in UBC. Aug 26, 2014. """ self.input=input if w is None: w_values = numpy.ones((n_in,), dtype=theano.config.floatX) # w_values = numpy.asarray(rng.uniform( # low=0, high=1, # size=(n_in,)), dtype=theano.config.floatX) w = theano.shared(value=w_values, name='w', borrow=True) self.w=w #u_values = numpy.ones((n_in,), dtype=theano.config.floatX) #u = theano.shared(value=u_values, name='u', borrow=True) #self.u=u # auxiliary variable for non-negativity self.output = self.w * self.input #self.params=[w,u] self.params=[w] def get_predicted(self,data): return self.w * data class HiddenLayer(object): def __init__(self, rng, input, n_in, n_out, W=None, b=None, activation=T.tanh): """ Typical hidden layer of a MLP: units are fully-connected and have sigmoidal activation function. Weight matrix W is of shape (n_in,n_out) and the bias vector b is of shape (n_out,). NOTE : The nonlinearity used here is tanh by default. Hidden unit activation is thus given by: tanh(dot(input,W) + b) :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type input: theano.tensor.dmatrix :param input: a symbolic tensor of shape (n_examples, n_in) :type n_in: int :param n_in: dimensionality of input :type n_out: int :param n_out: number of hidden units :type activation: theano.Op or function :param activation: Non linearity to be applied in the hidden layer """ self.input = input # `W` is initialized with `W_values` which is uniformely sampled # from sqrt(-6./(n_in+n_hidden)) and sqrt(6./(n_in+n_hidden)) # for tanh activation function # the output of uniform if converted using asarray to dtype # theano.config.floatX so that the code is runable on GPU # Note : optimal initialization of weights is dependent on the # activation function used (among other things). # For example, results presented in [Xavier10] suggest that you # should use 4 times larger initial weights for sigmoid # compared to tanh # We have no info for other function, so we use the same as # tanh. self.activation=activation if W is None: W_values = numpy.asarray(rng.uniform( low=-numpy.sqrt(6. / (n_in + n_out)), high=numpy.sqrt(6. / (n_in + n_out)), size=(n_in, n_out)), dtype=theano.config.floatX) if activation == theano.tensor.nnet.sigmoid: W_values *= 4 W = theano.shared(value=W_values, name='W', borrow=True) if b is None: b_values = numpy.zeros((n_out,), dtype=theano.config.floatX) b = theano.shared(value=b_values, name='b', borrow=True) self.W = W self.b = b lin_output = T.dot(input, self.W) + self.b self.output = (lin_output if activation is None else activation(lin_output)) # parameters of the model self.params = [self.W, self.b] def get_predicted(self,data): lin_output = T.dot(data, self.W) + self.b output = (lin_output if self.activation is None else self.activation(lin_output)) return output class DFS(object): """ Deep feature selection class. One-one input layer + MLP. """ def __init__(self, rng, n_in, n_hidden, n_out, x=None, y=None, activation=T.tanh, lambda1=0.001, lambda2=1.0, alpha1=0.001, alpha2=0.0): """Initialize the parameters for the DFL class. :type rng: numpy.random.RandomState :param rng: a random number generator used to initialize weights :type n_in: int :param n_in: number of input units, the dimension of the space in which the datapoints lie :type n_hidden: int :param n_hidden: number of hidden units :type n_out: int :param n_out: number of output units, the dimension of the space in which the labels lie activation: activation function, from {T.tanh, T.nnet.sigmoid} lambda1: float scalar, control the sparsity of the input weights. The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ). Thus, the larger lambda1 is, the sparser the input weights are. lambda2: float scalar, control the smoothness of the input weights. The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ). Thus, the larger lambda2 is, the smoother the input weights are. alpha1: float scalar, control the sparsity of the weight matrices in MLP. The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ). Thus, the larger alpha1 is, the sparser the MLP weights are. alpha2: float scalar, control the smoothness of the weight matrices in MLP. The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ). Thus, the larger alpha2 is, the smoother the MLP weights are. """ if not x: x=T.matrix('x') self.x=x if not y: y=T.ivector('y') self.y=y self.hidden_layers=[] self.params=[] self.n_layers=len(n_hidden) input_layer=InputLayer(input=self.x,n_in=n_in) self.params.extend(input_layer.params) self.input_layer=input_layer for i in range(len(n_hidden)): if i==0: # first hidden layer hd=HiddenLayer(rng=rng, input=self.input_layer.output, n_in=n_in, n_out=n_hidden[i], activation=activation) else: hd=HiddenLayer(rng=rng, input=self.hidden_layers[i-1].output, n_in=n_hidden[i-1], n_out=n_hidden[i], activation=activation) self.hidden_layers.append(hd) self.params.extend(hd.params) # The logistic regression layer gets as input the hidden units # of the hidden layer if len(n_hidden)<=0: self.logRegressionLayer = LogisticRegression( input=self.input_layer.output, n_in=n_in, n_out=n_out) else: self.logRegressionLayer = LogisticRegression( input=self.hidden_layers[-1].output, n_in=n_hidden[-1], n_out=n_out) self.params.extend(self.logRegressionLayer.params) # regularization terms self.L1_input=T.abs_(self.input_layer.w).sum() self.L2_input=(self.input_layer.w **2).sum() self.hinge_loss_neg=(T.maximum(0,-self.input_layer.w)).sum() # penalize negative values self.hinge_loss_pos=(T.maximum(0,self.input_layer.w)).sum() # # penalize positive values L1s=[] L2_sqrs=[] #L1s.append(abs(self.hidden_layers[0].W).sum()) for i in range(len(n_hidden)): L1s.append (T.abs_(self.hidden_layers[i].W).sum()) L2_sqrs.append((self.hidden_layers[i].W ** 2).sum()) L1s.append(T.abs_(self.logRegressionLayer.W).sum()) L2_sqrs.append((self.logRegressionLayer.W ** 2).sum()) self.L1 = T.sum(L1s) self.L2_sqr = T.sum(L2_sqrs) # negative log likelihood of the MLP is given by the negative # log likelihood of the output of the model, computed in the # logistic regression layer self.negative_log_likelihood = self.logRegressionLayer.negative_log_likelihood # same holds for the function computing the number of errors self.errors = self.logRegressionLayer.errors(self.y) # lambda3=0.5 # self.cost = self.negative_log_likelihood(self.y) \ # + lambda1*(1.0-lambda2)*0.5*self.L2_input \ # + lambda1*lambda2*(1.0-lambda3)*self.hinge_loss_pos \ # + lambda1*lambda2*lambda3*self.hinge_loss_neg \ # + alpha1*(1.0-alpha2)*0.5 * self.L2_sqr + alpha1*alpha2 * self.L1 self.cost = self.negative_log_likelihood(self.y) \ + lambda1*(1.0-lambda2)*0.5*self.L2_input \ + lambda1*lambda2*self.L1_input \ + alpha1*(1.0-alpha2)*0.5* self.L2_sqr + alpha1*alpha2 * self.L1 #self.cost = self.negative_log_likelihood(self.y) \ # + lambda1*(1.0-lambda2)*(0.5/n_in)*self.L2_input \ # + lambda1*lambda2*(1/n_in)*self.L1_input \ # + alpha1*(1.0-alpha2)*0.5 * self.L2_sqr + alpha1*alpha2 * self.L1 self.y_pred=self.logRegressionLayer.y_pred self.y_pred_prob=self.logRegressionLayer.y_pred_prob def build_train_function(self, train_set_x, train_set_y, batch_size, alpha, learning_rate_shared): """ Create a function to compute the mistakes that are made by the model. """ index = T.lscalar('index') # index to a [mini]batch # compute the gradients with respect to the model parameters grads = T.grad(self.cost, self.params) # add momentum # initialize the delta_i-1 delta_before=[] for param_i in self.params: delta_before_i=theano.shared(value=numpy.zeros(param_i.get_value().shape)) delta_before.append(delta_before_i) updates = [] for param_i, grad_i, delta_before_i in zip(self.params, grads, delta_before): delta_i=-learning_rate_shared * grad_i + alpha*delta_before_i updates.append((param_i, param_i + delta_i )) updates.append((delta_before_i,delta_i)) train_model_cost = theano.function([index], self.cost, updates=updates, givens={ self.x: train_set_x[index * batch_size: (index + 1) * batch_size], self.y: train_set_y[index * batch_size: (index + 1) * batch_size]}, name='train') return train_model_cost def build_valid_function(self,valid_set_x, valid_set_y, batch_size): """ Build symbolic validation function. """ n_valid_batches = int(math.ceil(valid_set_x.get_value(borrow=True).shape[0] / batch_size)) index = T.lscalar('index') # index to a [mini]batch valid_error_i = theano.function([index], self.errors, givens={self.x: valid_set_x[index * batch_size:(index + 1) * batch_size], self.y: valid_set_y[index * batch_size:(index + 1) * batch_size]}, name='valid') # Create a function that scans the entire validation set def valid_error(): return [valid_error_i(i) for i in xrange(n_valid_batches)] return valid_error def build_test_function(self, test_set_x, batch_size): """ Build symbolic test function. """ n_test_batches = int(math.ceil(test_set_x.get_value(borrow=True).shape[0] / batch_size)) index = T.lscalar('index') # index to a [mini]batch test_pred_i = theano.function([index], [self.y_pred,self.y_pred_prob], givens={self.x: test_set_x[index * batch_size : (index + 1) * batch_size]}, name='test') # Create a function that scans the entire test set def test_pred(): y_pred=[] y_pred_prob=[] for i in xrange(n_test_batches): label,prob=test_pred_i(i) y_pred.extend(label) y_pred_prob.extend(prob) return y_pred,y_pred_prob return test_pred def get_predicted(self,data): for i in range(len(self.hidden_layers)): data=self.hidden_layers[i].get_predicted(data) p_y_given_x = T.nnet.softmax(T.dot(data, self.logRegressionLayer.W) + self.logRegressionLayer.b) y_pred = T.argmax(p_y_given_x, axis=1) y_pred_prob = T.argmax(p_y_given_x, axis=1) return y_pred,y_pred_prob def get_params(self): return copy.deepcopy(self.params) def set_params(self, given_params): self.params=given_params def print_params(self): for param in self.params: print param.get_value(borrow=True) def save_params(self,filename): f=open(filename,'w') # remove existing file f.close() f=open(filename,'a') for param in self.params: pickle.dump(param.get_value(borrow=True),f) f.close() def read_params(filename): f=open(filename,'r') params=pickle.load(f) f.close() return params def train_model(train_set_x_org=None, train_set_y_org=None, valid_set_x_org=None, valid_set_y_org=None, learning_rate=0.1, alpha=0.01, lambda1=0.001, lambda2=1.0, alpha1=0.001, alpha2=0.0, n_hidden=[256,128,16], n_epochs=1000, batch_size=100, activation_func="tanh", rng=numpy.random.RandomState(100), max_num_epoch_change_learning_rate=100,max_num_epoch_change_rate=0.8,learning_rate_decay_rate=0.8): """ Train a deep feature selection model. INPUTS: train_set_x_org: numpy 2d array, each row is a training sample. train_set_y_org: numpy vector of type int {0,1,...,C-1}, class labels of training samples. valid_set_x_org: numpy 2d array, each row is a validation sample. This set is to monitor the convergence of optimization. valid_set_y_org: numpy vector of type int {0,1,...,C-1}, class labels of validation samples. learning_rate: float scalar, the initial learning rate. alpha: float, parameter to trade off the momentum term. lambda1: float scalar, control the sparsity of the input weights. The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ). Thus, the larger lambda1 is, the sparser the input weights are. lambda2: float scalar, control the smoothness of the input weights. The regularization term is lambda1( (1-lambda2)/2 * ||w||_2^2 + lambda2 * ||w||_1 ). Thus, the larger lambda2 is, the smoother the input weights are. alpha1: float scalar, control the sparsity of the weight matrices in MLP. The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ). Thus, the larger alpha1 is, the sparser the MLP weights are. alpha2: float scalar, control the smoothness of the weight matrices in MLP. The regularization term is alpha1( (1-alpha2)/2 * \sum||W_i||_2^2 + alpha2 \sum||W_i||_1 ). Thus, the larger alpha2 is, the smoother the MLP weights are. n_hidden, vector of int, n_hidden[i]: number of hidden units of the i-th layer. n_epochs: int scalar, the maximal number of epochs. batch_size: int scalar, minibatch size. activation_func: string, specify activation function. {"tanh" (default),"sigmoid"} rng: numpy random number state. OUTPUTS: classifier: object of MLP, the model learned, returned for testing. training_time: float, training time in seconds. """ train_set_x = theano.shared(numpy.asarray(train_set_x_org,dtype=theano.config.floatX),borrow=True) train_set_y = T.cast(theano.shared(numpy.asarray(train_set_y_org,dtype=theano.config.floatX),borrow=True),'int32') valid_set_x = theano.shared(numpy.asarray(valid_set_x_org,dtype=theano.config.floatX),borrow=True) valid_set_y = T.cast(theano.shared(numpy.asarray(valid_set_y_org,dtype=theano.config.floatX),borrow=True),'int32') # compute number of minibatches for training, validation and testing n_train_batches = int(math.ceil(train_set_x.get_value(borrow=True).shape[0] / batch_size)) # shared variable to reduce the learning rate learning_rate_shared=theano.shared(learning_rate,name='learn_rate_shared') decay_rate=T.scalar(name='decay_rate',dtype=theano.config.floatX) reduce_learning_rate=theano.function([decay_rate],learning_rate_shared,updates=[(learning_rate_shared,learning_rate_shared*decay_rate)]) ## define the model below num_feat=train_set_x.get_value(borrow=True).shape[1] # number of features n_cl=len(numpy.unique(train_set_y_org)) # number of classes activations={"tanh":T.tanh,"sigmoid":T.nnet.sigmoid,"relu":relu} activation=activations[activation_func] # build a MPL object classifier = DFS(rng=rng, n_in=num_feat, n_hidden=n_hidden, n_out=n_cl, lambda1=lambda1, lambda2=lambda2, alpha1=alpha1, alpha2=alpha2, activation=activation) train_model_one_iteration=classifier.build_train_function(train_set_x, train_set_y, batch_size, alpha, learning_rate_shared) validate_model=classifier.build_valid_function(valid_set_x, valid_set_y, batch_size) print '... training' # early-stopping parameters patience = 5000 # look as this many examples regardless patience_increase = 2 # wait this much longer when a new best is # found improvement_threshold = 0.995 # a relative improvement of this much is # considered significant validation_frequency = min(n_train_batches, patience / 2) # go through this many # minibatche before checking the network # on the validation set; in this case we # check every epoch best_validation_loss = numpy.inf #max_num_epoch_change_learning_rate=100 max_num_epoch_not_improve=3*max_num_epoch_change_learning_rate #max_num_epoch_change_rate=0.8 #learning_rate_decay_rate=0.8 epoch_change_count=0 start_time = time.clock() done_looping = False epoch = 0 while (epoch < n_epochs) and (not done_looping): epoch = epoch + 1 epoch_change_count=epoch_change_count+1 if epoch_change_count % max_num_epoch_change_learning_rate ==0: reduce_learning_rate(learning_rate_decay_rate) max_num_epoch_change_learning_rate= \ cl.change_max_num_epoch_change_learning_rate(max_num_epoch_change_learning_rate,max_num_epoch_change_rate) max_num_epoch_not_improve=3*max_num_epoch_change_learning_rate epoch_change_count=0 for minibatch_index in xrange(n_train_batches): minibatch_avg_cost = train_model_one_iteration(minibatch_index) # iteration number iter = (epoch - 1) * n_train_batches + minibatch_index if (iter + 1) % validation_frequency == 0: # compute zero-one loss on validation set validation_losses = validate_model() this_validation_loss = numpy.mean(validation_losses) print('epoch %i, minibatch %i/%i, validation error %f %%' % \ (epoch, minibatch_index + 1, n_train_batches, this_validation_loss * 100.)) # if we got the best validation score until now if this_validation_loss < best_validation_loss: num_epoch_not_improve=0 if this_validation_loss < best_validation_loss: #improve patience if loss improvement is good enough if this_validation_loss < best_validation_loss * \ improvement_threshold: patience = max(patience, iter * patience_increase) best_validation_loss = this_validation_loss # save a copy of the currently best model parameter best_model_params=classifier.get_params() if patience <= iter: done_looping = True break if this_validation_loss >= best_validation_loss: num_epoch_not_improve=num_epoch_not_improve+1 if num_epoch_not_improve>=max_num_epoch_not_improve: done_looping = True break # set the best model parameters classifier.set_params(best_model_params) end_time = time.clock() training_time=end_time-start_time print 'Training time: %f' %(training_time/60) print 'Optimization complete with best validation score of %f,' %(best_validation_loss * 100.) return classifier, training_time def test_model(classifier, test_set_x_org, batch_size): """ Predict class labels of given data using the model learned. INPUTS: classifier_trained: object of DFS, the model learned by function "train_model". test_set_x_org: numpy 2d array, each row is a sample whose label to be predicted. batch_size: int scalar, batch size, efficient for a very large number of test samples. OUTPUTS: test_set_y_predicted: numpy int vector, the class labels predicted. test_set_y_predicted_prob: numpy float vector, the probabilities. test_time: test time in seconds. """ start_time=time.clock() test_set_x = theano.shared(numpy.asarray(test_set_x_org,dtype=theano.config.floatX),borrow=True) test_model_func=classifier.build_test_function(test_set_x, batch_size) test_set_y_predicted,test_set_y_predicted_prob=test_model_func() end_time=time.clock() test_time=end_time-start_time return test_set_y_predicted,test_set_y_predicted_prob,test_time
# Copyright 2016 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. from brainiak.fcma.classifier import Classifier from scipy.stats.mstats import zscore from sklearn import svm from sklearn.linear_model import LogisticRegression import numpy as np import math from numpy.random import RandomState from scipy.spatial.distance import hamming # specify the random state to fix the random numbers prng = RandomState(1234567890) def create_epoch(idx, num_voxels): row = 12 col = num_voxels mat = prng.rand(row, col).astype(np.float32) # impose a pattern to even epochs if idx % 2 == 0: mat = np.sort(mat, axis=0) mat = zscore(mat, axis=0, ddof=0) # if zscore fails (standard deviation is zero), # set all values to be zero mat = np.nan_to_num(mat) mat = mat / math.sqrt(mat.shape[0]) return mat def test_classification(): fake_raw_data = [create_epoch(i, 5) for i in range(20)] labels = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # 5 subjects, 4 epochs per subject epochs_per_subj = 4 # svm svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto') training_data = fake_raw_data[0:12] clf = Classifier(svm_clf, epochs_per_subj=epochs_per_subj) clf.fit(list(zip(training_data, training_data)), labels[0:12]) expected_confidence = np.array([-1.18234421, 0.97403604, -1.04005679, 0.92403019, -0.95567738, 1.11746593, -0.83275891, 0.9486868]) recomputed_confidence = clf.decision_function(list(zip( fake_raw_data[12:], fake_raw_data[12:]))) hamming_distance = hamming(np.sign(expected_confidence), np.sign(recomputed_confidence) ) * expected_confidence.size assert hamming_distance <= 1, \ 'decision function of SVM with recomputation ' \ 'does not provide correct results' y_pred = clf.predict(list(zip(fake_raw_data[12:], fake_raw_data[12:]))) expected_output = [0, 0, 0, 1, 0, 1, 0, 1] hamming_distance = hamming(y_pred, expected_output) * len(y_pred) assert hamming_distance <= 1, \ 'classification via SVM does not provide correct results' confidence = clf.decision_function(list(zip(fake_raw_data[12:], fake_raw_data[12:]))) hamming_distance = hamming(np.sign(expected_confidence), np.sign(confidence) ) * confidence.size assert hamming_distance <= 1, \ 'decision function of SVM without recomputation ' \ 'does not provide correct results' y = [0, 1, 0, 1, 0, 1, 0, 1] score = clf.score(list(zip(fake_raw_data[12:], fake_raw_data[12:])), y) assert np.isclose([hamming(y_pred, y)], [1-score])[0], \ 'the prediction score is incorrect' # svm with partial similarity matrix computation clf = Classifier(svm_clf, num_processed_voxels=2, epochs_per_subj=epochs_per_subj) clf.fit(list(zip(fake_raw_data, fake_raw_data)), labels, num_training_samples=12) y_pred = clf.predict() expected_output = [0, 0, 0, 1, 0, 1, 0, 1] hamming_distance = hamming(y_pred, expected_output) * len(y_pred) assert hamming_distance <= 1, \ 'classification via SVM (partial sim) does not ' \ 'provide correct results' confidence = clf.decision_function() hamming_distance = hamming(np.sign(expected_confidence), np.sign(confidence)) * confidence.size assert hamming_distance <= 1, \ 'decision function of SVM (partial sim) without recomputation ' \ 'does not provide correct results' # logistic regression lr_clf = LogisticRegression() clf = Classifier(lr_clf, epochs_per_subj=epochs_per_subj) clf.fit(list(zip(training_data, training_data)), labels[0:12]) expected_confidence = np.array([-4.49666484, 3.73025553, -4.04181695, 3.73027436, -3.77043872, 4.42613412, -3.35616616, 3.77716609]) recomputed_confidence = clf.decision_function(list(zip( fake_raw_data[12:], fake_raw_data[12:]))) hamming_distance = hamming(np.sign(expected_confidence), np.sign(recomputed_confidence) ) * expected_confidence.size assert hamming_distance <= 1, \ 'decision function of logistic regression with recomputation ' \ 'does not provide correct results' y_pred = clf.predict(list(zip(fake_raw_data[12:], fake_raw_data[12:]))) expected_output = [0, 0, 0, 1, 0, 1, 0, 1] hamming_distance = hamming(y_pred, expected_output) * len(y_pred) assert hamming_distance <= 1, \ 'classification via logistic regression ' \ 'does not provide correct results' confidence = clf.decision_function(list(zip( fake_raw_data[12:], fake_raw_data[12:]))) hamming_distance = hamming(np.sign(expected_confidence), np.sign(confidence) ) * confidence.size assert hamming_distance <= 1, \ 'decision function of logistic regression without precomputation ' \ 'does not provide correct results' def test_classification_with_two_components(): fake_raw_data = [create_epoch(i, 5) for i in range(20)] fake_raw_data2 = [create_epoch(i, 6) for i in range(20)] labels = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1] # 5 subjects, 4 epochs per subject epochs_per_subj = 4 # svm svm_clf = svm.SVC(kernel='precomputed', shrinking=False, C=1, gamma='auto') training_data = fake_raw_data[0: 12] training_data2 = fake_raw_data2[0: 12] clf = Classifier(svm_clf, epochs_per_subj=epochs_per_subj) clf.fit(list(zip(training_data, training_data2)), labels[0:12]) expected_confidence = np.array([-1.23311606, 1.02440964, -0.93898336, 1.07028798, -1.04420007, 0.97647772, -1.0498268, 1.04970111]) recomputed_confidence = clf.decision_function(list(zip( fake_raw_data[12:], fake_raw_data2[12:]))) hamming_distance = hamming(np.sign(expected_confidence), np.sign(recomputed_confidence) ) * expected_confidence.size assert hamming_distance <= 1, \ 'decision function of SVM with recomputation ' \ 'does not provide correct results' y_pred = clf.predict(list(zip(fake_raw_data[12:], fake_raw_data2[12:]))) expected_output = [0, 1, 0, 1, 0, 1, 0, 1] hamming_distance = hamming(y_pred, expected_output) * len(y_pred) assert hamming_distance <= 1, \ 'classification via SVM does not provide correct results' confidence = clf.decision_function(list(zip( fake_raw_data[12:], fake_raw_data2[12:]))) hamming_distance = hamming(np.sign(expected_confidence), np.sign(confidence)) * confidence.size assert hamming_distance <= 1, \ 'decision function of SVM without recomputation ' \ 'does not provide correct results' y = [0, 1, 0, 1, 0, 1, 0, 1] score = clf.score(list(zip(fake_raw_data[12:], fake_raw_data2[12:])), y) assert np.isclose([hamming(y_pred, y)], [1-score])[0], \ 'the prediction score is incorrect' # svm with partial similarity matrix computation clf = Classifier(svm_clf, num_processed_voxels=2, epochs_per_subj=epochs_per_subj) clf.fit(list(zip(fake_raw_data, fake_raw_data2)), labels, num_training_samples=12) y_pred = clf.predict() expected_output = [0, 1, 0, 1, 0, 1, 0, 1] hamming_distance = hamming(y_pred, expected_output) * len(y_pred) assert hamming_distance <= 1, \ 'classification via SVM (partial sim) does not ' \ 'provide correct results' confidence = clf.decision_function() hamming_distance = hamming(np.sign(expected_confidence), np.sign(confidence)) * confidence.size assert hamming_distance <= 1, \ 'decision function of SVM (partial sim) without recomputation ' \ 'does not provide correct results' # logistic regression lr_clf = LogisticRegression() clf = Classifier(lr_clf, epochs_per_subj=epochs_per_subj) # specifying num_training_samples is for coverage clf.fit(list(zip(training_data, training_data2)), labels[0:12], num_training_samples=12) expected_confidence = np.array([-4.90819848, 4.22548132, -3.76255726, 4.46505975, -4.19933099, 4.08313584, -4.23070437, 4.31779758]) recomputed_confidence = clf.decision_function(list(zip( fake_raw_data[12:], fake_raw_data2[12:]))) hamming_distance = hamming(np.sign(expected_confidence), np.sign(recomputed_confidence) ) * expected_confidence.size assert hamming_distance <= 1, \ 'decision function of logistic regression with recomputation ' \ 'does not provide correct results' y_pred = clf.predict(list(zip(fake_raw_data[12:], fake_raw_data2[12:]))) expected_output = [0, 1, 0, 1, 0, 1, 0, 1] hamming_distance = hamming(y_pred, expected_output) * len(y_pred) assert hamming_distance <= 1, \ 'classification via logistic regression ' \ 'does not provide correct results' confidence = clf.decision_function(list(zip(fake_raw_data[12:], fake_raw_data2[12:]))) hamming_distance = hamming(np.sign(expected_confidence), np.sign(confidence)) * confidence.size assert hamming_distance <= 1, \ 'decision function of logistic regression without precomputation ' \ 'does not provide correct results' if __name__ == '__main__': test_classification() test_classification_with_two_components()
#!/usr/bin/python # # azure_api.py - an Azure plugin for Vcycle # # THIS FILE NEEDS UPDATING FOR Vcycle 3.0 CHANGES! # # Andrew McNab, University of Manchester. # Luis Villazon Esteban, CERN. # Copyright (c) 2013-7. All rights reserved. # # Redistribution and use in source and binary forms, with or # without modification, are permitted provided that the following # conditions are met: # # o Redistributions of source code must retain the above # copyright notice, this list of conditions and the following # disclaimer. # o Redistributions in binary form must reproduce the above # copyright notice, this list of conditions and the following # disclaimer in the documentation and/or other materials # provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND # CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF # MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED # TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. # # Contacts: Andrew.McNab@cern.ch http://www.gridpp.ac.uk/vcycle/ # Luis.Villazon.Esteban@cern.ch # import pprint import os import sys import stat import time import json import shutil import string import pycurl import random import base64 import StringIO import tempfile import calendar from azure import * from azure.servicemanagement import * import vcycle.vacutils class AzureError(Exception): pass class AzureSpace(vcycle.BaseSpace): def __init__(self, api, apiVersion, spaceName, parser, spaceSectionName, updatePipes): # Initialize data structures from configuration files # Generic initialization vcycle.BaseSpace.__init__(self, api, apiVersion, spaceName, parser, spaceSectionName, updatePipes) # Azure-specific initialization try: self.tenancy_name = parser.get(spaceSectionName, 'tenancy_name') except Exception as e: raise AzureError('tenancy_name is required in Azure [space ' + spaceName + '] (' + str(e) + ')') try: self.subscription = parser.get(spaceSectionName, 'subscription') except Exception as e: raise AzureError('subscription is required in Azure [space ' + spaceName + '] (' + str(e) + ')') try: self.certificate = parser.get(spaceSectionName, 'certificate') except Exception as e: raise AzureError('certificate is required in Azure [space ' + spaceName + '] (' + str(e) + ')') try: self.location = parser.get(spaceSectionName, 'location') except Exception as e: raise AzureError('location is required in Azure [space ' + spaceName + '] (' + str(e) + ')') try: self.pfx = parser.get(spaceSectionName, 'pfx') except Exception as e: raise AzureError('pfx is required in Azure [space ' + spaceName + '] (' + str(e) + ')') try: self.username = parser.get(spaceSectionName, 'username') except Exception as e: raise AzureError('username is required in Azure [space ' + spaceName + '] (' + str(e) + ')') try: self.password = parser.get(spaceSectionName, 'password') except Exception as e: raise AzureError('password is required in Azure [space ' + spaceName + '] (' + str(e) + ')') def connect(self): # Connect to the Azure service #Nothing to do pass def scanMachines(self): """Query Azure compute service for details of machines in this space""" # For each machine found in the space, this method is responsible for # either (a) ignorning non-Vcycle VMs but updating self.totalProcessors # or (b) creating a Machine object for the VM in self.spaces try: sms = ServiceManagementService(self.subscription, self.certificate) results = sms.list_hosted_services() except Exception as ex: if 'file' in str(ex): raise AzureError("No cert file , check the path.") raise AzureError(str(ex)) # Convert machines from None to an empty dictionary since we successfully connected self.machines = {} for result in results: try: info = sms.get_hosted_service_properties(result.service_name, True) except WindowsAzureMissingResourceError as ex: vcycle.vacutils.logLine("% don't have vms? " % result.service_name) continue if len(info.deployments) == 0 : continue if not result.service_name.startswith('vcycle-'): # Still count VMs that we didn't create and won't manage, to avoid going above space limit self.totalProcessors += 1 # FIXME: GET THE REAL NUMBER, NOT JUST 1 continue uuidStr = str(result.service_name) ip = '0.0.0.0' createdTime = calendar.timegm(time.strptime(result.hosted_service_properties.date_created, "%Y-%m-%dT%H:%M:%SZ")) updatedTime = calendar.timegm(time.strptime(result.hosted_service_properties.date_last_modified, "%Y-%m-%dT%H:%M:%SZ")) startedTime = calendar.timegm(time.strptime(result.hosted_service_properties.date_created, "%Y-%m-%dT%H:%M:%SZ")) machinetypeName = None try: status = info.deployments[0].role_instance_list[0].instance_status if status in ['Unknown', 'CreatingVM', 'StartingVM', 'CreatingRole', 'StartingRole', 'ReadyRole', 'BusyRole', 'Preparing','ProvisioningFailed']: state = vcycle.MachineState.starting elif status in ['StoppingRole', 'StoppingVM', 'DeletingVM', 'StoppedVM', 'RestartingRole','StoppedDeallocated']: state = vcycle.MachineState.deleting else: state = vcycle.MachineState.starting except Exception as ex: import json vcycle.vacutils.logLine(json.dumps(info,indent=2)) vcycle.vacutils.logLine(str(ex)) state = vcycle.MachineState.starting self.machines[result.service_name] = vcycle.Machine(name = result.service_name, spaceName = self.spaceName, state = state, ip = ip, createdTime = createdTime, startedTime = startedTime, updatedTime = updatedTime, uuidStr = uuidStr, machinetypeName = machinetypeName) def createMachine(self, machineName, machinetypeName, zone = None): try: self.__create_service(name=machineName, location=self.location) fingerprint, path = self.__add_certificate_to_service(name=machineName, pfx=self.pfx) self.__create_vm(name=machineName, flavor=self.machinetypes[machinetypeName].flavor_names[0], image=self.machinetypes[machinetypeName].root_image, username= self.username, password= self.password, user_data=base64.b64encode(open('/var/lib/vcycle/machines/' + machineName + '/user_data', 'r').read()), fingerprint=(fingerprint, path)) vcycle.vacutils.logLine('Created ' + machineName + ' (' + machineName + ') for ' + machinetypeName + ' within ' + self.spaceName) self.machines[machineName] = vcycle.shared.Machine(name = machineName, spaceName = self.spaceName, state = vcycle.MachineState.starting, ip = '0.0.0.0', createdTime = int(time.time()), startedTime = None, updatedTime = int(time.time()), uuidStr = None, machinetypeName = machinetypeName) except Exception as ex: try: self.__delete(machineName) raise AzureError(str(ex)) except Exception as ex: raise AzureError(str(ex)) def deleteOneMachine(self, machineName): sms = ServiceManagementService(self.subscription, self.certificate) try: sms.delete_hosted_service(machineName, True) except Exception as e: raise vcycle.shared.VcycleError('Cannot delete ' + machineName + ' (' + str(e) + ')') def __create_service(self, name="", location=None): """ Create a new service :param name: Name of the service :param location: Location of the service """ sms = ServiceManagementService(self.subscription, self.certificate) result = sms.check_hosted_service_name_availability(name) if not result: raise AzureError("The service name %s is not available" % name) try: result = sms.create_hosted_service(name, name, name, location) sms.wait_for_operation_status(result.request_id) except Exception as ex: raise AzureError("The service name %s is not available" % name) def __add_certificate_to_service(self, name="", pfx=""): """ Adds a certificate into the service. The certificate is used to connect via ssh to the VM :param name: Name of the service where the certificate will be added :param pfx: location on local disk of the certificate to upload """ import base64 sms = ServiceManagementService(self.subscription, self.certificate) result = sms.add_service_certificate(name, base64.b64encode(open(pfx).read()), 'pfx', '') sms.wait_for_operation_status(result.request_id) list = sms.list_service_certificates(name) for certificate in list: return certificate.thumbprint, certificate.certificate_url def __create_vm(self, name="", flavor="", image="", username="", password="", user_data=None, fingerprint=None): """ Creates new VM :param name: Name of the new VM :param flavor: Flavor to create the VM :param image: Image to create the VM :param username: username to use to connect to the vm via SSH :param password: password to use to connect to the vm via SSH :param user_data: contextualization file """ sms = ServiceManagementService(self.subscription, self.certificate) configuration_set = LinuxConfigurationSet(host_name=name, user_name=username, user_password=password, disable_ssh_password_authentication=False, custom_data=user_data) if fingerprint is not None: configuration_set.ssh.public_keys.public_keys.append(PublicKey(fingerprint=fingerprint[0], path=fingerprint[1])) network_set = ConfigurationSet() network_set.input_endpoints.input_endpoints.append(ConfigurationSetInputEndpoint(name='SSH', protocol="TCP", port=22, local_port=22)) result = sms.create_virtual_machine_deployment(name, name, 'production', name, name, configuration_set, None, network_config= network_set, role_size=flavor, vm_image_name=image, provision_guest_agent=True) def __delete(self, identifier): """Deletes a VM in the provider :param identifier: vm identifier """ sms = ServiceManagementService(self.subscription, self.certificate) try: sms.delete_hosted_service(identifier, True) except Exception as e: raise AzureError(str(e))
import os import subprocess from collections import OrderedDict import cothread from annotypes import Anno from malcolm import __version__ from malcolm.core import ( Alarm, AlarmSeverity, BadValueError, ProcessStartHook, ProcessStopHook, StringMeta, Widget, ) from malcolm.modules import builtin, ca from malcolm.modules.ca.util import catools from ..parts.dirparsepart import DirParsePart from ..parts.iociconpart import IocIconPart def await_ioc_start(stats, prefix): cothread.Yield() pid_rbv = catools.caget("%s:PID" % prefix, timeout=5) if int(pid_rbv) != os.getpid(): raise BadValueError( "Got back different PID: " + "is there another system instance on the machine?" ) catools.caput( "%s:YAML:PATH" % prefix, stats["yaml_path"], datatype=catools.DBR_CHAR_STR ) catools.caput( "%s:PYMALCOLM:PATH" % prefix, stats["pymalcolm_path"], datatype=catools.DBR_CHAR_STR, ) def start_ioc(stats, prefix): db_macros = "prefix='%s'" % prefix try: epics_base = os.environ["EPICS_BASE"] except KeyError: raise BadValueError("EPICS base not defined in environment") softIoc_bin = epics_base + "/bin/linux-x86_64/softIoc" for key, value in stats.items(): db_macros += ",%s='%s'" % (key, value) root = os.path.split(os.path.dirname(os.path.abspath(__file__)))[0] db_template = os.path.join(root, "db", "system.template") ioc = subprocess.Popen( [softIoc_bin, "-m", db_macros, "-d", db_template], stdout=subprocess.PIPE, stdin=subprocess.PIPE, ) cothread.Spawn(await_ioc_start, stats, prefix) return ioc with Anno("prefix for self.system PVs"): APvPrefix = str with Anno("space-separated list of IOCs to monitor"): AIocList = str class ProcessController(builtin.controllers.ManagerController): def __init__( self, mri: builtin.controllers.AMri, prefix: APvPrefix, config_dir: builtin.controllers.AConfigDir, ioc_list: AIocList = "", ) -> None: super().__init__(mri, config_dir) self.ioc = None self.ioc_blocks: OrderedDict = OrderedDict() self.prefix = prefix self.bl_iocs = ioc_list.split(" ") if self.bl_iocs[-1] == "": self.bl_iocs = self.bl_iocs[:-1] self.stats = dict() # TODO: the following stuff is all Linux-specific.... sys_call_bytes = ( open("/proc/%s/cmdline" % os.getpid(), "rb").read().split(b"\0") ) sys_call = [el.decode("utf-8") for el in sys_call_bytes] self.stats["pymalcolm_path"] = os.path.abspath(sys_call[1]) self.stats["yaml_path"] = os.path.abspath(sys_call[2]) self.stats["yaml_ver"] = self.parse_yaml_version( self.stats["yaml_path"], "/dls_sw/work", "/dls_sw/prod" ) self.stats["pymalcolm_ver"] = __version__ hostname = os.uname()[1] self.stats["kernel"] = "%s %s" % (os.uname()[0], os.uname()[2]) self.stats["hostname"] = ( hostname if len(hostname) < 39 else hostname[:35] + "..." ) self.stats["pid"] = str(os.getpid()) self.pymalcolm_path = StringMeta( "Path to pymalcolm executable", tags=[Widget.MULTILINETEXTUPDATE.tag()] ).create_attribute_model(self.stats["pymalcolm_path"]) self.pymalcolm_ver = StringMeta( "Version of pymalcolm executable", tags=[Widget.TEXTUPDATE.tag()] ).create_attribute_model(self.stats["pymalcolm_ver"]) self.yaml_path = StringMeta( "Path to yaml configuration file", tags=[Widget.MULTILINETEXTUPDATE.tag()] ).create_attribute_model(self.stats["yaml_path"]) self.yaml_ver = StringMeta( "version of yaml configuration file", tags=[Widget.TEXTUPDATE.tag()] ).create_attribute_model(self.stats["yaml_ver"]) self.hostname = StringMeta( "Name of host machine", tags=[Widget.TEXTUPDATE.tag()] ).create_attribute_model(self.stats["hostname"]) self.kernel = StringMeta( "Kernel of host machine", tags=[Widget.TEXTUPDATE.tag()] ).create_attribute_model(self.stats["kernel"]) self.pid = StringMeta( "process ID of pymalcolm instance", tags=[Widget.TEXTUPDATE.tag()] ).create_attribute_model(self.stats["pid"]) self.field_registry.add_attribute_model("pymalcolmPath", self.pymalcolm_path) self.field_registry.add_attribute_model("pymalcolmVer", self.pymalcolm_ver) self.field_registry.add_attribute_model("yamlPath", self.yaml_path) self.field_registry.add_attribute_model("yamlVer", self.yaml_ver) self.field_registry.add_attribute_model("hostname", self.hostname) self.field_registry.add_attribute_model("kernel", self.kernel) self.field_registry.add_attribute_model("pid", self.pid) if self.stats["yaml_ver"] in ["work", "unknown"]: message = "Non-prod YAML config" alarm = Alarm(message=message, severity=AlarmSeverity.MINOR_ALARM) self.update_health("", builtin.infos.HealthInfo(alarm)) self.register_hooked(ProcessStartHook, self.init) self.register_hooked(ProcessStopHook, self.stop_ioc) def init(self): if self.ioc is None: self.ioc = start_ioc(self.stats, self.prefix) self.get_ioc_list() super().init() msg = ( """\ pymalcolm %(pymalcolm_ver)s started Path: %(pymalcolm_path)s Yaml: %(yaml_path)s""" % self.stats ) self._run_git_cmd("commit", "--allow-empty", "-m", msg) def set_default_layout(self): name = [] mri = [] x = [] y = [] visible = [] for part_name in self.parts.keys(): if isinstance(self.parts[part_name], builtin.parts.ChildPart): visible += [True] x += [0] y += [0] name += [part_name] mri += [self.parts[part_name].mri] self.set_layout(builtin.util.LayoutTable(name, mri, x, y, visible)) def stop_ioc(self): if self.ioc is not None: self.ioc.terminate() self.ioc = None def get_ioc_list(self): ioc_controllers = [] for ioc in self.bl_iocs: ioc_controller = make_ioc_status(ioc) ioc_controllers += [ioc_controller] self.process.add_controllers(ioc_controllers) for ioc in self.bl_iocs: self.add_part(builtin.parts.ChildPart(name=ioc, mri=ioc + ":STATUS")) def parse_yaml_version(self, file_path, work_area, prod_area): ver = "unknown" if file_path.startswith(work_area): ver = "work" elif file_path.startswith(prod_area): ver = self._run_git_cmd( "describe", "--tags", "--exact-match", cwd=os.path.split(file_path)[0] ) if ver is None: return "Prod (unknown version)" ver = ver.strip(b"\n").decode("utf-8") return ver def make_ioc_status(ioc): controller = builtin.controllers.StatefulController(ioc + ":STATUS") controller.add_part( ca.parts.CAStringPart( name="epicsVersion", description="EPICS version", rbv=(ioc + ":EPICS_VERS"), throw=False, ) ) controller.add_part( IocIconPart(ioc, (os.path.split(__file__)[0] + "/../icons/epics-logo.svg")) ) controller.add_part(DirParsePart(ioc, ioc)) controller.add_part( ca.parts.CAActionPart( "restartIoc", description="restart IOC via procServ", pv=(ioc + ":RESTART"), throw=False, ) ) return controller
# uncompyle6 version 2.9.10 # Python bytecode 2.7 (62211) # Decompiled from: Python 3.6.0b2 (default, Oct 11 2016, 05:27:10) # [GCC 6.2.0 20161005] # Embedded file name: IpAddr.py class IpAddr: IPADDR_TYPE_IPV4 = 0 IPADDR_TYPE_IPV6 = 1 def __init__(self): import array self.m_ipv4 = 0 self.m_ipv6 = array.array('B', (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)) self.m_type = self.IPADDR_TYPE_IPV4 self.m_ipv6_scope_id = 0 def __str__(self): return self.__repr__() def __repr__(self): if self.m_type == self.IPADDR_TYPE_IPV4: return '%u.%u.%u.%u' % (self.m_ipv4 >> 24 & 255, self.m_ipv4 >> 16 & 255, self.m_ipv4 >> 8 & 255, self.m_ipv4 & 255) else: hextets = [] i = 0 while i < 16: hextets.append('%x' % (self.m_ipv6[i] << 8 | self.m_ipv6[i + 1])) i = i + 2 if self.m_ipv6_scope_id != 0: return ':'.join(self._compress_hextets(hextets)) + '%%%u' % self.m_ipv6_scope_id return ':'.join(self._compress_hextets(hextets)) def __copy__(self): x = IpAddr() x.m_ipv4 = self.m_ipv4 x.m_ipv6 = self.m_ipv6 x.m_type = self.m_type x.m_ipv6_scope_id = self.m_ipv6_scope_id return x def __deepcopy__(self, memo): import array x = IpAddr() x.m_ipv4 = self.m_ipv4 x.m_ipv6 = array.array('B', self.m_ipv6) x.m_type = self.m_type x.m_ipv6_scope_id = self.m_ipv6_scope_id return x def CreateFromString(addrStr): obj = IpAddr() if addrStr.find(':') != -1: obj.SetAddr(IpAddr.IPADDR_TYPE_IPV6, _inet_pton6(addrStr)) elif addrStr.find('.') != -1: obj.SetAddr(IpAddr.IPADDR_TYPE_IPV4, _inet_pton4(addrStr)) else: raise RuntimeError('Invalid address string (%s)' % addrStr) return obj CreateFromString = staticmethod(CreateFromString) def GetType(self): return self.m_type def GetAddr(self): if self.m_type == self.IPADDR_TYPE_IPV4: return self.m_ipv4 else: return self.m_ipv6 def GetScopeId(self): return self.m_ipv6_scope_id def IsValid(self): if self.m_type != self.IPADDR_TYPE_IPV4 or self.m_ipv4 != 0: return True else: return False def SetAddr(self, type, addr): if type == self.IPADDR_TYPE_IPV4: if not isinstance(addr, (int, long)): raise RuntimeError("IPv4 address must be an 'int' or 'long'") self.m_ipv4 = addr self.m_type = self.IPADDR_TYPE_IPV4 elif type == self.IPADDR_TYPE_IPV6: import array if not isinstance(addr, array.array): raise RuntimeError("IPv6 address must be a 16 byte 'array'") if len(addr) != 16: raise RuntimeError("IPv6 address must be a 16 byte 'array'") self.m_ipv6 = array.array('B', addr) self.m_type = self.IPADDR_TYPE_IPV6 else: raise RuntimeError('Invalid address type specified') def SetScopeId(self, scope_id): self.m_ipv6_scope_id = scope_id def _compress_hextets(self, hextets): best_doublecolon_start = -1 best_doublecolon_len = 0 doublecolon_start = -1 doublecolon_len = 0 for index in range(len(hextets)): if hextets[index] == '0': doublecolon_len += 1 if doublecolon_start == -1: doublecolon_start = index if doublecolon_len > best_doublecolon_len: best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = -1 if best_doublecolon_len > 1: best_doublecolon_end = best_doublecolon_start + best_doublecolon_len if best_doublecolon_end == len(hextets): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [ ''] if best_doublecolon_start == 0: hextets = [ ''] + hextets return hextets def _inet_pton4(addrStr): parts = addrStr.split('.') if len(parts) != 4: raise ValueError('Invalid IPv4 address string') addr = 0 for part in parts: val = int(part, 10) if val < 0 or val > 255: raise ValueError('Invalid IPv4 address string') addr = addr << 8 | val return addr def _inet_pton6(addrStr): import array NS_INT16SZ = 2 NS_INADDRSZ = 4 NS_IN6ADDRSZ = 16 addrChars = list(addrStr) addr = array.array('B', [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) if addrChars[0] == ':' and addrChars[1] != ':': raise ValueError('Invalid IPv6 address string') if addrChars[0] == ':' and addrChars[1] == ':': addrChars = [ '0'] + addrChars colonReadIndex = -1 colonWriteIndex = -1 xdigits = '0123456789abcdefABCDEF' readIndex = 0 writeIndex = 0 saw_xdigit = False val = 0 for ch in addrChars: if xdigits.find(ch) != -1: val = val << 4 val = val | int(ch, 16) if val > 65535: raise ValueError('Invalid IPv6 address string') saw_xdigit = True readIndex = readIndex + 1 continue if ch == ':': colonReadIndex = readIndex + 1 if not saw_xdigit: if colonWriteIndex != -1: raise ValueError('Invalid IPv6 address string') colonWriteIndex = writeIndex readIndex = readIndex + 1 continue if writeIndex + NS_INT16SZ > 16: raise ValueError('Invalid IPv6 address string') addr[writeIndex] = val >> 8 & 255 writeIndex = writeIndex + 1 addr[writeIndex] = val & 255 writeIndex = writeIndex + 1 saw_xdigit = False val = 0 readIndex = readIndex + 1 continue if ch == '.' and writeIndex + NS_INADDRSZ <= 16: try: addrChars[0:colonReadIndex] = [] ipv4Str = ''.join(addrChars) ipv4Addr = _inet_pton4(ipv4Str) addr[writeIndex] = ipv4Addr >> 24 & 255 writeIndex = writeIndex + 1 addr[writeIndex] = ipv4Addr >> 16 & 255 writeIndex = writeIndex + 1 addr[writeIndex] = ipv4Addr >> 8 & 255 writeIndex = writeIndex + 1 addr[writeIndex] = ipv4Addr & 255 writeIndex = writeIndex + 1 saw_xdigit = False break except: raise raise ValueError('Invalid IPv6 address string') if saw_xdigit: if writeIndex + NS_INT16SZ > 16: raise ValueError('Invalid IPv6 address string') addr[writeIndex] = val >> 8 & 255 writeIndex = writeIndex + 1 addr[writeIndex] = val & 255 writeIndex = writeIndex + 1 if colonWriteIndex != -1: n = writeIndex - colonWriteIndex i = 1 while i <= n: addr[16 - i] = addr[colonWriteIndex + n - i] addr[colonWriteIndex + n - i] = 0 i = i + 1 writeIndex = 16 if writeIndex != 16: raise ValueError('Invalid IPv6 address string') return addr
""" Support for firewalld. .. versionadded:: 2015.2.0 """ import logging import re import salt.utils.path from salt.exceptions import CommandExecutionError log = logging.getLogger(__name__) def __virtual__(): """ Check to see if firewall-cmd exists """ if salt.utils.path.which("firewall-cmd"): return True return ( False, "The firewalld execution module cannot be loaded: the firewall-cmd binary is" " not in the path.", ) def __firewall_cmd(cmd): """ Return the firewall-cmd location """ firewall_cmd = "{} {}".format(salt.utils.path.which("firewall-cmd"), cmd) out = __salt__["cmd.run_all"](firewall_cmd) if out["retcode"] != 0: if not out["stderr"]: msg = out["stdout"] else: msg = out["stderr"] raise CommandExecutionError("firewall-cmd failed: {}".format(msg)) return out["stdout"] def __mgmt(name, _type, action): """ Perform zone management """ # It's permanent because the 4 concerned functions need the permanent option, it's wrong without cmd = "--{}-{}={} --permanent".format(action, _type, name) return __firewall_cmd(cmd) def __parse_zone(cmd): """ Return zone information in a dictionary """ _zone = {} id_ = "" for i in __firewall_cmd(cmd).splitlines(): if i.strip(): if re.match("^[a-z0-9]", i, re.I): zone_name = i.rstrip() else: if i.startswith("\t"): _zone[zone_name][id_].append(i.strip()) continue (id_, val) = i.split(":", 1) id_ = id_.strip() if _zone.get(zone_name, None): _zone[zone_name].update({id_: [val.strip()]}) else: _zone[zone_name] = {id_: [val.strip()]} return _zone def version(): """ Return version from firewall-cmd CLI Example: .. code-block:: bash salt '*' firewalld.version """ return __firewall_cmd("--version") def reload_rules(): """ Reload the firewall rules, which makes the permanent configuration the new runtime configuration without losing state information. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.reload_rules """ return __firewall_cmd("--reload") def default_zone(): """ Print default zone for connections and interfaces CLI Example: .. code-block:: bash salt '*' firewalld.default_zone """ return __firewall_cmd("--get-default-zone") def list_zones(permanent=True): """ List everything added for or enabled in all zones CLI Example: .. code-block:: bash salt '*' firewalld.list_zones """ cmd = "--list-all-zones" if permanent: cmd += " --permanent" return __parse_zone(cmd) def get_zones(permanent=True): """ Print predefined zones CLI Example: .. code-block:: bash salt '*' firewalld.get_zones """ cmd = "--get-zones" if permanent: cmd += " --permanent" return __firewall_cmd(cmd).split() def get_services(permanent=True): """ Print predefined services CLI Example: .. code-block:: bash salt '*' firewalld.get_services """ cmd = "--get-services" if permanent: cmd += " --permanent" return __firewall_cmd(cmd).split() def get_icmp_types(permanent=True): """ Print predefined icmptypes CLI Example: .. code-block:: bash salt '*' firewalld.get_icmp_types """ cmd = "--get-icmptypes" if permanent: cmd += " --permanent" return __firewall_cmd(cmd).split() def new_zone(zone, restart=True): """ Add a new zone CLI Example: .. code-block:: bash salt '*' firewalld.new_zone my_zone By default firewalld will be reloaded. However, to avoid reloading you need to specify the restart as False .. code-block:: bash salt '*' firewalld.new_zone my_zone False """ out = __mgmt(zone, "zone", "new") if restart: if out == "success": return __firewall_cmd("--reload") return out def delete_zone(zone, restart=True): """ Delete an existing zone CLI Example: .. code-block:: bash salt '*' firewalld.delete_zone my_zone By default firewalld will be reloaded. However, to avoid reloading you need to specify the restart as False .. code-block:: bash salt '*' firewalld.delete_zone my_zone False """ out = __mgmt(zone, "zone", "delete") if restart: if out == "success": return __firewall_cmd("--reload") return out def set_default_zone(zone): """ Set default zone CLI Example: .. code-block:: bash salt '*' firewalld.set_default_zone damian """ return __firewall_cmd("--set-default-zone={}".format(zone)) def new_service(name, restart=True): """ Add a new service CLI Example: .. code-block:: bash salt '*' firewalld.new_service my_service By default firewalld will be reloaded. However, to avoid reloading you need to specify the restart as False .. code-block:: bash salt '*' firewalld.new_service my_service False """ out = __mgmt(name, "service", "new") if restart: if out == "success": return __firewall_cmd("--reload") return out def delete_service(name, restart=True): """ Delete an existing service CLI Example: .. code-block:: bash salt '*' firewalld.delete_service my_service By default firewalld will be reloaded. However, to avoid reloading you need to specify the restart as False .. code-block:: bash salt '*' firewalld.delete_service my_service False """ out = __mgmt(name, "service", "delete") if restart: if out == "success": return __firewall_cmd("--reload") return out def list_all(zone=None, permanent=True): """ List everything added for or enabled in a zone CLI Example: .. code-block:: bash salt '*' firewalld.list_all List a specific zone .. code-block:: bash salt '*' firewalld.list_all my_zone """ if zone: cmd = "--zone={} --list-all".format(zone) else: cmd = "--list-all" if permanent: cmd += " --permanent" return __parse_zone(cmd) def list_services(zone=None, permanent=True): """ List services added for zone as a space separated list. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.list_services List a specific zone .. code-block:: bash salt '*' firewalld.list_services my_zone """ if zone: cmd = "--zone={} --list-services".format(zone) else: cmd = "--list-services" if permanent: cmd += " --permanent" return __firewall_cmd(cmd).split() def add_service(service, zone=None, permanent=True): """ Add a service for zone. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.add_service ssh To assign a service to a specific zone: .. code-block:: bash salt '*' firewalld.add_service ssh my_zone """ if zone: cmd = "--zone={} --add-service={}".format(zone, service) else: cmd = "--add-service={}".format(service) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def remove_service(service, zone=None, permanent=True): """ Remove a service from zone. This option can be specified multiple times. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.remove_service ssh To remove a service from a specific zone .. code-block:: bash salt '*' firewalld.remove_service ssh dmz """ if zone: cmd = "--zone={} --remove-service={}".format(zone, service) else: cmd = "--remove-service={}".format(service) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def add_service_port(service, port): """ Add a new port to the specified service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_service_port zone 80 """ if service not in get_services(permanent=True): raise CommandExecutionError("The service does not exist.") cmd = "--permanent --service={} --add-port={}".format(service, port) return __firewall_cmd(cmd) def remove_service_port(service, port): """ Remove a port from the specified service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_service_port zone 80 """ if service not in get_services(permanent=True): raise CommandExecutionError("The service does not exist.") cmd = "--permanent --service={} --remove-port={}".format(service, port) return __firewall_cmd(cmd) def get_service_ports(service): """ List ports of a service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.get_service_ports zone """ cmd = "--permanent --service={} --get-ports".format(service) return __firewall_cmd(cmd).split() def add_service_protocol(service, protocol): """ Add a new protocol to the specified service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_service_protocol zone ssh """ cmd = "--permanent --service={} --add-protocol={}".format(service, protocol) return __firewall_cmd(cmd) def remove_service_protocol(service, protocol): """ Remove a protocol from the specified service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_service_protocol zone ssh """ cmd = "--permanent --service={} --remove-protocol={}".format(service, protocol) return __firewall_cmd(cmd) def get_service_protocols(service): """ List protocols of a service. .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.get_service_protocols zone """ cmd = "--permanent --service={} --get-protocols".format(service) return __firewall_cmd(cmd).split() def get_masquerade(zone=None, permanent=True): """ Show if masquerading is enabled on a zone. If zone is omitted, default zone will be used. CLI Example: .. code-block:: bash salt '*' firewalld.get_masquerade zone """ zone_info = list_all(zone, permanent) if "no" in [zone_info[i]["masquerade"][0] for i in zone_info]: return False return True def add_masquerade(zone=None, permanent=True): """ Enable masquerade on a zone. If zone is omitted, default zone will be used. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_masquerade To enable masquerade on a specific zone .. code-block:: bash salt '*' firewalld.add_masquerade dmz """ if zone: cmd = "--zone={} --add-masquerade".format(zone) else: cmd = "--add-masquerade" if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def remove_masquerade(zone=None, permanent=True): """ Remove masquerade on a zone. If zone is omitted, default zone will be used. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_masquerade To remove masquerade on a specific zone .. code-block:: bash salt '*' firewalld.remove_masquerade dmz """ if zone: cmd = "--zone={} --remove-masquerade".format(zone) else: cmd = "--remove-masquerade" if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def add_port(zone, port, permanent=True, force_masquerade=False): """ Allow specific ports in a zone. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_port internal 443/tcp force_masquerade when a zone is created ensure masquerade is also enabled on that zone. """ if force_masquerade and not get_masquerade(zone): add_masquerade(zone) cmd = "--zone={} --add-port={}".format(zone, port) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def remove_port(zone, port, permanent=True): """ Remove a specific port from a zone. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_port internal 443/tcp """ cmd = "--zone={} --remove-port={}".format(zone, port) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def list_ports(zone, permanent=True): """ List all ports in a zone. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.list_ports """ cmd = "--zone={} --list-ports".format(zone) if permanent: cmd += " --permanent" return __firewall_cmd(cmd).split() def add_port_fwd( zone, src, dest, proto="tcp", dstaddr="", permanent=True, force_masquerade=False ): """ Add port forwarding. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_port_fwd public 80 443 tcp force_masquerade when a zone is created ensure masquerade is also enabled on that zone. """ if force_masquerade and not get_masquerade(zone): add_masquerade(zone) cmd = "--zone={} --add-forward-port=port={}:proto={}:toport={}:toaddr={}".format( zone, src, proto, dest, dstaddr ) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def remove_port_fwd(zone, src, dest, proto="tcp", dstaddr="", permanent=True): """ Remove Port Forwarding. .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_port_fwd public 80 443 tcp """ cmd = "--zone={} --remove-forward-port=port={}:proto={}:toport={}:toaddr={}".format( zone, src, proto, dest, dstaddr ) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def list_port_fwd(zone, permanent=True): """ List port forwarding .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.list_port_fwd public """ ret = [] cmd = "--zone={} --list-forward-ports".format(zone) if permanent: cmd += " --permanent" for i in __firewall_cmd(cmd).splitlines(): (src, proto, dest, addr) = i.split(":") ret.append( { "Source port": src.split("=")[1], "Protocol": proto.split("=")[1], "Destination port": dest.split("=")[1], "Destination address": addr.split("=")[1], } ) return ret def block_icmp(zone, icmp, permanent=True): """ Block a specific ICMP type on a zone .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.block_icmp zone echo-reply """ if icmp not in get_icmp_types(permanent): log.error("Invalid ICMP type") return False if icmp in list_icmp_block(zone, permanent): log.info("ICMP block already exists") return "success" cmd = "--zone={} --add-icmp-block={}".format(zone, icmp) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def allow_icmp(zone, icmp, permanent=True): """ Allow a specific ICMP type on a zone .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewalld.allow_icmp zone echo-reply """ if icmp not in get_icmp_types(permanent): log.error("Invalid ICMP type") return False if icmp not in list_icmp_block(zone, permanent): log.info("ICMP Type is already permitted") return "success" cmd = "--zone={} --remove-icmp-block={}".format(zone, icmp) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def list_icmp_block(zone, permanent=True): """ List ICMP blocks on a zone .. versionadded:: 2015.8.0 CLI Example: .. code-block:: bash salt '*' firewlld.list_icmp_block zone """ cmd = "--zone={} --list-icmp-blocks".format(zone) if permanent: cmd += " --permanent" return __firewall_cmd(cmd).split() def make_permanent(): """ Make current runtime configuration permanent. .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.make_permanent """ return __firewall_cmd("--runtime-to-permanent") def get_interfaces(zone, permanent=True): """ List interfaces bound to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.get_interfaces zone """ cmd = "--zone={} --list-interfaces".format(zone) if permanent: cmd += " --permanent" return __firewall_cmd(cmd).split() def add_interface(zone, interface, permanent=True): """ Bind an interface to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_interface zone eth0 """ if interface in get_interfaces(zone, permanent): log.info("Interface is already bound to zone.") cmd = "--zone={} --add-interface={}".format(zone, interface) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def remove_interface(zone, interface, permanent=True): """ Remove an interface bound to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_interface zone eth0 """ if interface not in get_interfaces(zone, permanent): log.info("Interface is not bound to zone.") cmd = "--zone={} --remove-interface={}".format(zone, interface) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def get_sources(zone, permanent=True): """ List sources bound to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.get_sources zone """ cmd = "--zone={} --list-sources".format(zone) if permanent: cmd += " --permanent" return __firewall_cmd(cmd).split() def add_source(zone, source, permanent=True): """ Bind a source to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_source zone 192.168.1.0/24 """ if source in get_sources(zone, permanent): log.info("Source is already bound to zone.") cmd = "--zone={} --add-source={}".format(zone, source) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def remove_source(zone, source, permanent=True): """ Remove a source bound to a zone .. versionadded:: 2016.3.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_source zone 192.168.1.0/24 """ if source not in get_sources(zone, permanent): log.info("Source is not bound to zone.") cmd = "--zone={} --remove-source={}".format(zone, source) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def get_rich_rules(zone, permanent=True): """ List rich rules bound to a zone .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.get_rich_rules zone """ cmd = "--zone={} --list-rich-rules".format(zone) if permanent: cmd += " --permanent" return __firewall_cmd(cmd).splitlines() def add_rich_rule(zone, rule, permanent=True): """ Add a rich rule to a zone .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.add_rich_rule zone 'rule' """ cmd = "--zone={} --add-rich-rule='{}'".format(zone, rule) if permanent: cmd += " --permanent" return __firewall_cmd(cmd) def remove_rich_rule(zone, rule, permanent=True): """ Add a rich rule to a zone .. versionadded:: 2016.11.0 CLI Example: .. code-block:: bash salt '*' firewalld.remove_rich_rule zone 'rule' """ cmd = "--zone={} --remove-rich-rule='{}'".format(zone, rule) if permanent: cmd += " --permanent" return __firewall_cmd(cmd)
from __future__ import absolute_import from __future__ import print_function from __future__ import unicode_literals import unicodedata import datetime import re import time from pytz import UTC from .enums import ResourceType from .permissions import Permissions EPOCH_DT = datetime.datetime.fromtimestamp(0, UTC) RE_LINUX = re.compile( r""" ^ ([-dlpscbD]) ([r-][w-][xsS-][r-][w-][xsS-][r-][w-][xtT-][\.\+]?) \s+? (\d+) \s+? ([A-Za-z0-9][A-Za-z0-9\-\.\_\@]*\$?) \s+? ([A-Za-z0-9][A-Za-z0-9\-\.\_\@]*\$?) \s+? (\d+) \s+? (\w{3}\s+\d{1,2}\s+[\w:]+) \s+ (.*?) $ """, re.VERBOSE, ) RE_WINDOWSNT = re.compile( r""" ^ (?P<modified_date>\S+) \s+ (?P<modified_time>\S+(AM|PM)?) \s+ (?P<size>(<DIR>|\d+)) \s+ (?P<name>.*) $ """, re.VERBOSE, ) def get_decoders(): """Return all available FTP LIST line decoders with their matching regexes.""" decoders = [ (RE_LINUX, decode_linux), (RE_WINDOWSNT, decode_windowsnt), ] return decoders def parse(lines): info = [] for line in lines: if not line.strip(): continue raw_info = parse_line(line) if raw_info is not None: info.append(raw_info) return info def parse_line(line): for line_re, decode_callable in get_decoders(): match = line_re.match(line) if match is not None: return decode_callable(line, match) return None def _parse_time(t, formats): for frmt in formats: try: _t = time.strptime(t, frmt) break except ValueError: continue else: return None year = _t.tm_year if _t.tm_year != 1900 else time.localtime().tm_year month = _t.tm_mon day = _t.tm_mday hour = _t.tm_hour minutes = _t.tm_min dt = datetime.datetime(year, month, day, hour, minutes, tzinfo=UTC) epoch_time = (dt - EPOCH_DT).total_seconds() return epoch_time def _decode_linux_time(mtime): return _parse_time(mtime, formats=["%b %d %Y", "%b %d %H:%M"]) def decode_linux(line, match): ty, perms, links, uid, gid, size, mtime, name = match.groups() is_link = ty == "l" is_dir = ty == "d" or is_link if is_link: name, _, _link_name = name.partition("->") name = name.strip() _link_name = _link_name.strip() permissions = Permissions.parse(perms) mtime_epoch = _decode_linux_time(mtime) name = unicodedata.normalize("NFC", name) raw_info = { "basic": {"name": name, "is_dir": is_dir}, "details": { "size": int(size), "type": int(ResourceType.directory if is_dir else ResourceType.file), }, "access": {"permissions": permissions.dump()}, "ftp": {"ls": line}, } access = raw_info["access"] details = raw_info["details"] if mtime_epoch is not None: details["modified"] = mtime_epoch access["user"] = uid access["group"] = gid return raw_info def _decode_windowsnt_time(mtime): return _parse_time(mtime, formats=["%d-%m-%y %I:%M%p", "%d-%m-%y %H:%M"]) def decode_windowsnt(line, match): """Decode a Windows NT FTP LIST line. Examples: Decode a directory line:: >>> line = "11-02-18 02:12PM <DIR> images" >>> match = RE_WINDOWSNT.match(line) >>> pprint(decode_windowsnt(line, match)) {'basic': {'is_dir': True, 'name': 'images'}, 'details': {'modified': 1518358320.0, 'type': 1}, 'ftp': {'ls': '11-02-18 02:12PM <DIR> images'}} Decode a file line:: >>> line = "11-02-18 03:33PM 9276 logo.gif" >>> match = RE_WINDOWSNT.match(line) >>> pprint(decode_windowsnt(line, match)) {'basic': {'is_dir': False, 'name': 'logo.gif'}, 'details': {'modified': 1518363180.0, 'size': 9276, 'type': 2}, 'ftp': {'ls': '11-02-18 03:33PM 9276 logo.gif'}} Alternatively, the time might also be present in 24-hour format:: >>> line = "11-02-18 15:33 9276 logo.gif" >>> match = RE_WINDOWSNT.match(line) >>> decode_windowsnt(line, match)["details"]["modified"] 1518363180.0 """ is_dir = match.group("size") == "<DIR>" raw_info = { "basic": { "name": match.group("name"), "is_dir": is_dir, }, "details": { "type": int(ResourceType.directory if is_dir else ResourceType.file), }, "ftp": {"ls": line}, } if not is_dir: raw_info["details"]["size"] = int(match.group("size")) modified = _decode_windowsnt_time( match.group("modified_date") + " " + match.group("modified_time") ) if modified is not None: raw_info["details"]["modified"] = modified return raw_info
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Normalization preprocessing layer.""" # pylint: disable=g-classes-have-attributes from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy as np from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.framework import tensor_shape from tensorflow.python.keras import backend as K from tensorflow.python.keras.engine import base_preprocessing_layer from tensorflow.python.ops import array_ops from tensorflow.python.ops import init_ops from tensorflow.python.ops import math_ops from tensorflow.python.ops import nn_impl from tensorflow.python.ops import variables from tensorflow.python.util.tf_export import keras_export @keras_export('keras.layers.experimental.preprocessing.Normalization') class Normalization(base_preprocessing_layer.PreprocessingLayer): """Feature-wise normalization of the data. This layer will coerce its inputs into a distribution centered around 0 with standard deviation 1. It accomplishes this by precomputing the mean and variance of the data, and calling (input-mean)/sqrt(var) at runtime. What happens in `adapt`: Compute mean and variance of the data and store them as the layer's weights. `adapt` should be called before `fit`, `evaluate`, or `predict`. Args: axis: Integer or tuple of integers, the axis or axes that should be "kept". These axes are not be summed over when calculating the normalization statistics. By default the last axis, the `features` axis is kept and any `space` or `time` axes are summed. Each element in the the axes that are kept is normalized independently. If `axis` is set to 'None', the layer will perform scalar normalization (dividing the input by a single scalar value). The `batch` axis, 0, is always summed over (`axis=0` is not allowed). mean: The mean value(s) to use during normalization. The passed value(s) will be broadcast to the shape of the kept axes above; if the value(s) cannot be broadcast, an error will be raised when this layer's build() method is called. variance: The variance value(s) to use during normalization. The passed value(s) will be broadcast to the shape of the kept axes above; if the value(s)cannot be broadcast, an error will be raised when this layer's build() method is called. Examples: Calculate the mean and variance by analyzing the dataset in `adapt`. >>> adapt_data = np.array([[1.], [2.], [3.], [4.], [5.]], dtype=np.float32) >>> input_data = np.array([[1.], [2.], [3.]], np.float32) >>> layer = Normalization() >>> layer.adapt(adapt_data) >>> layer(input_data) <tf.Tensor: shape=(3, 1), dtype=float32, numpy= array([[-1.4142135 ], [-0.70710677], [ 0. ]], dtype=float32)> Pass the mean and variance directly. >>> input_data = np.array([[1.], [2.], [3.]], np.float32) >>> layer = Normalization(mean=3., variance=2.) >>> layer(input_data) <tf.Tensor: shape=(3, 1), dtype=float32, numpy= array([[-1.4142135 ], [-0.70710677], [ 0. ]], dtype=float32)> """ def __init__(self, axis=-1, mean=None, variance=None, **kwargs): super(Normalization, self).__init__(stateful=True, streaming=True, **kwargs) base_preprocessing_layer.keras_kpl_gauge.get_cell('Normalization').set(True) # Standardize `axis` to a tuple. if axis is None: axis = () elif isinstance(axis, int): axis = (axis,) else: axis = tuple(axis) if 0 in axis: raise ValueError('The argument \'axis\' may not be 0.') self.axis = axis # Set `mean` and `variance` if passed. if isinstance(mean, variables.Variable): raise ValueError('Normalization does not support passing a Variable ' 'for the `mean` init arg.') if isinstance(variance, variables.Variable): raise ValueError('Normalization does not support passing a Variable ' 'for the `variance` init arg.') if mean is not None and variance is not None: mean = convert_to_ndarray(mean) variance = convert_to_ndarray(variance) elif mean is not None or variance is not None: raise ValueError( 'When setting values directly, both `mean` and `variance` ' 'must be set. Got mean: {} and variance: {}'.format(mean, variance)) self.mean_val = mean self.variance_val = variance def build(self, input_shape): input_shape = tensor_shape.TensorShape(input_shape).as_list() if len(input_shape) == 1: input_shape = input_shape + [1] ndim = len(input_shape) if any(a < 1 - ndim or a >= ndim for a in self.axis): raise ValueError('All `axis` values must be in the range ' '[1 - ndim, ndim - 1]. Found ' 'ndim: `{}`, axis: {}'.format(ndim, self.axis)) # Axes to be kept, replacing negative values with positive equivalents. # Sorted to avoid transposing axes. self._keep_axis = sorted([d if d >= 0 else d + ndim for d in self.axis]) # Axes to be reduced. self._reduce_axis = [d for d in range(ndim) if d not in self._keep_axis] # 1 if an axis should be reduced, 0 otherwise. self._reduce_axis_mask = [ 0 if d in self._keep_axis else 1 for d in range(ndim) ] # Broadcast any reduced axes. self._broadcast_shape = [ input_shape[d] if d in self._keep_axis else 1 for d in range(ndim) ] # Create variables without keeping reduced axes. mean_and_var_shape = tuple(input_shape[d] for d in self._keep_axis) self.mean = self.add_weight( name='mean', shape=mean_and_var_shape, dtype=self.dtype, initializer=init_ops.zeros_initializer, trainable=False) self.variance = self.add_weight( name='variance', shape=mean_and_var_shape, dtype=self.dtype, initializer=init_ops.ones_initializer, trainable=False) self.count = self.add_weight( name='count', shape=(), dtype=dtypes.int64, initializer=init_ops.zeros_initializer, trainable=False) super(Normalization, self).build(input_shape) if (self.mean_val is not None and self.variance_val is not None): mean_val = self.mean_val * np.ones(mean_and_var_shape) variance_val = self.variance_val * np.ones(mean_and_var_shape) self.set_weights([mean_val, variance_val]) self.built = True def update_state(self, data): if not self.built: raise RuntimeError('`build` must be called before `update_state`.') data = self._standardize_inputs(data) batch_mean, batch_variance = nn_impl.moments_v2( data, axes=self._reduce_axis) batch_shape = array_ops.shape(data, out_type=self.count.dtype) batch_reduce_shape = array_ops.gather(batch_shape, self._reduce_axis) batch_count = math_ops.reduce_prod(batch_reduce_shape) total_count = batch_count + self.count batch_weight = ( math_ops.cast(batch_count, dtype=self.dtype) / math_ops.cast(total_count, dtype=self.dtype)) existing_weight = 1. - batch_weight total_mean = self.mean * existing_weight + batch_mean * batch_weight # The variance is computed using the lack-of-fit sum of squares # formula (see https://en.wikipedia.org/wiki/Lack-of-fit_sum_of_squares). total_variance = ((self.variance + (self.mean - total_mean)**2) * existing_weight + (batch_variance + (batch_mean - total_mean)**2) * batch_weight) self.mean.assign(total_mean) self.variance.assign(total_variance) self.count.assign(total_count) def merge_state(self, layers): layers = layers + [self] if any(not l.built for l in layers): raise ValueError( 'All layers to be merged must have been adapted to some inputs ' 'first (otherwise they have no state).') layer_counts = [l.count for l in layers] layer_means = [l.mean for l in layers] layer_variances = [l.variance for l in layers] total_count = math_ops.reduce_sum(layer_counts) layer_weightings = ( math_ops.cast(layer_counts, self.dtype) / math_ops.cast(total_count, self.dtype)) layer_weightings = array_ops.reshape( layer_weightings, shape=[len(layers)] + [1] * self.mean.shape.rank) total_mean = math_ops.reduce_sum(layer_means * layer_weightings, axis=0) inter_layer_variances = (layer_means - total_mean)**2 total_variance = math_ops.reduce_sum( ((layer_variances + inter_layer_variances) * layer_weightings), axis=0) self.mean.assign(total_mean) self.variance.assign(total_variance) self.count.assign(total_count) def reset_state(self): # pylint: disable=method-hidden if self.built: self.mean.assign(array_ops.zeros_like(self.mean)) self.variance.assign(array_ops.ones_like(self.variance)) self.count.assign(array_ops.zeros_like(self.count)) def call(self, inputs): inputs = self._standardize_inputs(inputs) # We need to reshape the mean and variance data to ensure that Tensorflow # broadcasts the data correctly. mean = array_ops.reshape(self.mean, self._broadcast_shape) variance = array_ops.reshape(self.variance, self._broadcast_shape) return ((inputs - mean) / math_ops.maximum(math_ops.sqrt(variance), K.epsilon())) def compute_output_shape(self, input_shape): return input_shape def compute_output_signature(self, input_spec): return input_spec def get_config(self): config = super(Normalization, self).get_config() config.update({'axis': self.axis}) return config def set_weights(self, weights): """Override for set_weights to ensure we can set just mean/var weights.""" if len(weights) == 2: weights.append(np.array(0)) super(Normalization, self).set_weights(weights) def _standardize_inputs(self, inputs): inputs = ops.convert_to_tensor_v2_with_dispatch(inputs) if inputs.shape.rank == 0: inputs = array_ops.reshape(inputs, [1, 1]) elif inputs.shape.rank == 1: inputs = array_ops.expand_dims(inputs, 1) if inputs.dtype != self.dtype: inputs = math_ops.cast(inputs, self.dtype) return inputs def convert_to_ndarray(values): if isinstance(values, np.ndarray): return values elif isinstance(values, ops.Tensor): return K.get_value(values) else: return np.array(values)
from zope.interface import implementer from node.ext.uml.interfaces import ( ModelIllFormedException, IPackage, IUMLElement, IAction, IActivity, IActivityEdge, IActivityFinalNode, IActivityNode, IBehavior, IConstraint, IControlNode, IDecisionNode, IFinalNode, IFlowFinalNode, IForkNode, IInitialNode, IJoinNode, IMergeNode, IOpaqueAction, IPreConstraint, IPostConstraint, ) from node.ext.uml.core import UMLElement @implementer(IActivityNode) class ActivityNode(UMLElement): def check_model_constraints(self): super(ActivityNode, self).check_model_constraints() try: assert self.__parent__ is not None assert IActivity.providedBy(self.__parent__) except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "An ActivityNode must have an Activity as parent" @property def activity(self): return self.__parent__ # cache this @property def incoming_edges(self): for obj in self.activity.filtereditervalues(IActivityEdge): if obj.target.uuid == self.uuid: yield obj # cache this @property def outgoing_edges(self): for obj in self.activity.filtereditervalues(IActivityEdge): if obj.source.uuid == self.uuid: yield obj @implementer(IAction) class Action(ActivityNode): # TODO: leave or remove? @property def context(self): return self.activity.context @property def preconditions(self): return self.filtereditervalues(IPreConstraint) @property def postconditions(self): return self.filtereditervalues(IPostConstraint) @implementer(IBehavior) class Behavior(UMLElement): def __init__(self, name=None, context=None): self.context = context super(Behavior, self).__init__(name) @property def preconditions(self): return self.filtereditervalues(IPreConstraint) @property def postconditions(self): return self.filtereditervalues(IPostConstraint) @implementer(IControlNode) class ControlNode(ActivityNode): pass @implementer(IFinalNode) class FinalNode(ControlNode): def check_model_constraints(self): super(FinalNode, self).check_model_constraints() try: assert list(super(FinalNode, self).outgoing_edges) == [] except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ u"FinalNode cannot have outgoing edges" ############################################################################### # CONCRETE CLASSES ############################################################################### @implementer(IActivity) class Activity(Behavior): abstract = False def check_model_constraints(self): super(Activity, self).check_model_constraints() try: assert(IPackage.providedBy(self.package)) except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "An activity must have exactly one package as parent." @property def package(self): return self.__parent__ @property def nodes(self): return self.filtereditervalues(IActivityNode) @property def edges(self): return self.filtereditervalues(IActivityEdge) # Convinience method, not defined by UML 2.2 specification @property def actions(self): return self.filtereditervalues(IAction) @implementer(IOpaqueAction) class OpaqueAction(Action): abstract = False @implementer(IActivityEdge) class ActivityEdge(UMLElement): abstract = False def check_model_constraints(self): super(ActivityEdge, self).check_model_constraints() try: assert self.source or self.target is not None except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "An ActivityEdge must have source and target set" # [1] try: assert self.source.activity is self.target.activity except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "Source and target must be in the same activity" # [2] try: assert self.__parent__ is not None assert IActivity.providedBy(self.__parent__) except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "An ActivityEdge must have an Activity as parent" try: assert IActivityNode.providedBy(self.source) except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "An ActivityEdge must have an ActivityNode as source" try: assert IActivityNode.providedBy(self.target) except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "An ActivityEdge must have an ActivityNode as target" source_uuid = None target_uuid = None def __init__(self, name=None, source=None, target=None, guard=None): # TODO: bool(source) evals to False if IControlNode.providedBy(source) if IActivityNode.providedBy(source): self.source = source if IActivityNode.providedBy(target): self.target = target self.guard = guard super(ActivityEdge, self).__init__(name) @property def activity(self): return self.__parent__ def get_source(self): return self.node(self.source_uuid) def set_source(self, source): # TODO: invalidate cache key for target's outgoingEdges method self.source_uuid = source.uuid source = property(get_source, set_source) def get_target(self): return self.node(self.target_uuid) def set_target(self, target): # TODO: invalidate cache key for target's incomingEdges method self.target_uuid = target.uuid target = property(get_target, set_target) ############################################################################## # Initial and final ############################################################################## @implementer(IInitialNode) class InitialNode(ControlNode): abstract = False def check_model_constraints(self): super(InitialNode, self).check_model_constraints() # [1] try: assert list(super(InitialNode, self).incoming_edges) == [] except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ u"InitialNode cannot have incoming edges" @implementer(IActivityFinalNode) class ActivityFinalNode(FinalNode): abstract = False @implementer(IFlowFinalNode) class FlowFinalNode(FinalNode): abstract = False ############################################################################### # More control nodes ############################################################################### @implementer(IDecisionNode) class DecisionNode(ControlNode): abstract = False def check_model_constraints(self): super(DecisionNode, self).check_model_constraints() # [1] try: assert len(list(self.incoming_edges)) is 1 assert len(list(self.outgoing_edges)) >= 1 except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "A DecisionNode has one incoming edge and at least"\ "one outgoing edge." @implementer(IForkNode) class ForkNode(ControlNode): abstract = False def check_model_constraints(self): super(ForkNode, self).check_model_constraints() # [1] try: assert len(list(self.incoming_edges)) is 1 assert len(list(self.outgoing_edges)) >= 1 except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ "A ForkNode has one incoming edge and at least "\ "one outgoing edge." @implementer(IJoinNode) class JoinNode(ControlNode): abstract = False def check_model_constraints(self): super(JoinNode, self).check_model_constraints() # [1] try: assert len(list(self.incoming_edges)) >= 1 assert len(list(self.outgoing_edges)) is 1 except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ u"A join node has one outgoing edge and at least "\ u"one incoming edge." # TODO: UML2's MergeNode behavior does not reduce concurrency # here the concurrency is reduced if 2 tokens come into the node # at a time. THIS SHOULD BE CHANGED... @implementer(IMergeNode) class MergeNode(ControlNode): abstract = False def check_model_constraints(self): super(MergeNode, self).check_model_constraints() # [1] try: assert len(list(self.incoming_edges)) >= 1 assert len(list(self.outgoing_edges)) is 1 except AssertionError: raise ModelIllFormedException,\ str(self) + " " +\ u"A merge node has one outgoing edge and at least"\ u"one incoming edge." ############################################################################### # Constraints ############################################################################### @implementer(IConstraint) class Constraint(UMLElement): abstract = False def __init__(self, name=None, specification=None): self.specification = specification super(Constraint, self).__init__(name) @property def constrained_element(self): return self.__parent__ @implementer(IPreConstraint) class PreConstraint(Constraint): abstract = False @implementer(IPostConstraint) class PostConstraint(Constraint): abstract = False def validate(node): """Recursive model validation """ if IUMLElement.providedBy(node): node.check_model_constraints() for sub in node.filtereditervalues(IUMLElement): validate(sub) def get_element_by_xmiid(node, xmiid): if node.xmiid == xmiid: return node # TODO: may not get all elements if an INode but not IUMLElement providing # element sits within the hierachy for el in node.filtereditervalues(IUMLElement): ele = get_element_by_xmiid(el, xmiid) if ele is not None: return ele return None
import os import logging import datetime import webapp2 from google.appengine.api import memcache from google.appengine.ext import ndb from google.appengine.ext.webapp import template import tba_config from base_controller import CacheableHandler from consts.event_type import EventType from consts.notification_type import NotificationType from helpers.event_helper import EventHelper from models.event import Event from models.insight import Insight from models.team import Team from models.sitevar import Sitevar def render_static(page): memcache_key = "main_%s" % page html = memcache.get(memcache_key) if html is None: path = os.path.join(os.path.dirname(__file__), "../templates/%s.html" % page) html = template.render(path, {}) if tba_config.CONFIG["memcache"]: memcache.set(memcache_key, html, 86400) return html def handle_404(request, response, exception): response.write(render_static("404")) response.set_status(404) def handle_500(request, response, exception): logging.exception(exception) response.write(render_static("500")) response.set_status(500) class MainKickoffHandler(CacheableHandler): CACHE_VERSION = 3 CACHE_KEY_FORMAT = "main_kickoff" def __init__(self, *args, **kw): super(MainKickoffHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 def _render(self, *args, **kw): kickoff_datetime_est = datetime.datetime(2016, 1, 9, 10, 30) kickoff_datetime_utc = kickoff_datetime_est + datetime.timedelta(hours=5) is_kickoff = datetime.datetime.now() >= kickoff_datetime_est - datetime.timedelta(days=1) # turn on 1 day before self.template_values.update({ 'is_kickoff': is_kickoff, 'kickoff_datetime_est': kickoff_datetime_est, 'kickoff_datetime_utc': kickoff_datetime_utc, }) path = os.path.join(os.path.dirname(__file__), "../templates/index_kickoff.html") return template.render(path, self.template_values) class MainBuildseasonHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "main_buildseason" def __init__(self, *args, **kw): super(MainBuildseasonHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): endbuild_datetime_est = datetime.datetime(2016, 2, 23, 23, 59) endbuild_datetime_utc = endbuild_datetime_est + datetime.timedelta(hours=5) week_events = EventHelper.getWeekEvents() self.template_values.update({ 'endbuild_datetime_est': endbuild_datetime_est, 'endbuild_datetime_utc': endbuild_datetime_utc, 'events': week_events, }) path = os.path.join(os.path.dirname(__file__), "../templates/index_buildseason.html") return template.render(path, self.template_values) class MainChampsHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "main_champs" def __init__(self, *args, **kw): super(MainChampsHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 def _render(self, *args, **kw): year = datetime.datetime.now().year event_keys = Event.query(Event.year == year, Event.event_type_enum.IN(EventType.CMP_EVENT_TYPES)).fetch(100, keys_only=True) events = [event_key.get() for event_key in event_keys] self.template_values.update({ "events": events, "year": year, }) insights = ndb.get_multi([ndb.Key(Insight, Insight.renderKeyName(year, insight_name)) for insight_name in Insight.INSIGHT_NAMES.values()]) for insight in insights: if insight: self.template_values[insight.name] = insight path = os.path.join(os.path.dirname(__file__), '../templates/index_champs.html') return template.render(path, self.template_values) class MainCompetitionseasonHandler(CacheableHandler): CACHE_VERSION = 5 CACHE_KEY_FORMAT = "main_competitionseason" def __init__(self, *args, **kw): super(MainCompetitionseasonHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 def _render(self, *args, **kw): week_events = EventHelper.getWeekEvents() self.template_values.update({ "events": week_events, }) path = os.path.join(os.path.dirname(__file__), '../templates/index_competitionseason.html') return template.render(path, self.template_values) class MainInsightsHandler(CacheableHandler): CACHE_VERSION = 2 CACHE_KEY_FORMAT = "main_insights" def __init__(self, *args, **kw): super(MainInsightsHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 def _render(self, *args, **kw): week_events = EventHelper.getWeekEvents() year = datetime.datetime.now().year self.template_values.update({ "events": week_events, "year": year, }) insights = ndb.get_multi([ndb.Key(Insight, Insight.renderKeyName(year, insight_name)) for insight_name in Insight.INSIGHT_NAMES.values()]) for insight in insights: if insight: self.template_values[insight.name] = insight path = os.path.join(os.path.dirname(__file__), '../templates/index_insights.html') return template.render(path, self.template_values) class MainOffseasonHandler(CacheableHandler): CACHE_VERSION = 2 CACHE_KEY_FORMAT = "main_offseason" def __init__(self, *args, **kw): super(MainOffseasonHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 def _render(self, *args, **kw): week_events = EventHelper.getWeekEvents() self.template_values.update({ "events": week_events, }) path = os.path.join(os.path.dirname(__file__), '../templates/index_offseason.html') return template.render(path, self.template_values) class ContactHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "main_contact" def __init__(self, *args, **kw): super(ContactHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/contact.html") return template.render(path, self.template_values) class HashtagsHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "main_hashtags" def __init__(self, *args, **kw): super(HashtagsHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/hashtags.html") return template.render(path, self.template_values) class AboutHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "main_about" def __init__(self, *args, **kw): super(AboutHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/about.html") return template.render(path, self.template_values) class ThanksHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "main_thanks" def __init__(self, *args, **kw): super(ThanksHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/thanks.html") return template.render(path, self.template_values) class OprHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "main_opr" def __init__(self, *args, **kw): super(OprHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/opr.html") return template.render(path, self.template_values) class SearchHandler(webapp2.RequestHandler): def get(self): try: q = self.request.get("q") logging.info("search query: %s" % q) if q.isdigit(): team_id = "frc%s" % q team = Team.get_by_id(team_id) if team: self.redirect(team.details_url) return None elif len(q) in {3, 4, 5}: # event shorts are between 3 and 5 characters long year = datetime.datetime.now().year # default to current year event_id = "%s%s" % (year, q) event = Event.get_by_id(event_id) if event: self.redirect(event.details_url) return None except Exception, e: logging.warning("warning: %s" % e) finally: self.response.out.write(render_static("search")) class GamedayHandler(CacheableHandler): CACHE_VERSION = 2 CACHE_KEY_FORMAT = "main_gameday" def __init__(self, *args, **kw): super(GamedayHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 def _render(self, *args, **kw): special_webcasts_future = Sitevar.get_by_id_async('gameday.special_webcasts') special_webcasts_temp = special_webcasts_future.get_result() if special_webcasts_temp: special_webcasts_temp = special_webcasts_temp.contents else: special_webcasts_temp = {} special_webcasts = [] for webcast in special_webcasts_temp.values(): toAppend = {} for key, value in webcast.items(): toAppend[str(key)] = str(value) special_webcasts.append(toAppend) ongoing_events = [] ongoing_events_w_webcasts = [] week_events = EventHelper.getWeekEvents() for event in week_events: if event.now: ongoing_events.append(event) if event.webcast: valid = [] for webcast in event.webcast: if 'type' in webcast and 'channel' in webcast: event_webcast = {'event': event} valid.append(event_webcast) # Add webcast numbers if more than one for an event if len(valid) > 1: count = 1 for event in valid: event['count'] = count count += 1 ongoing_events_w_webcasts += valid self.template_values.update({ 'special_webcasts': special_webcasts, 'ongoing_events': ongoing_events, 'ongoing_events_w_webcasts': ongoing_events_w_webcasts }) path = os.path.join(os.path.dirname(__file__), '../templates/gameday.html') return template.render(path, self.template_values) class WebcastsHandler(CacheableHandler): CACHE_VERSION = 2 CACHE_KEY_FORMAT = "main_webcasts" def __init__(self, *args, **kw): super(WebcastsHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): year = datetime.datetime.now().year event_keys = Event.query(Event.year == year).order(Event.start_date).fetch(500, keys_only=True) events = ndb.get_multi(event_keys) self.template_values.update({ 'events': events, 'year': year, }) path = os.path.join(os.path.dirname(__file__), '../templates/webcasts.html') return template.render(path, self.template_values) class RecordHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "main_record" def __init__(self, *args, **kw): super(RecordHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/record.html") return template.render(path, self.template_values) class ApiDocumentationHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "api_docs" def __init__(self, *args, **kw): super(ApiDocumentationHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/apidocs.html") return template.render(path, self.template_values) class ApiWriteHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "api_write" def __init__(self, *args, **kw): super(ApiWriteHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/apiwrite.html") return template.render(path, self.template_values) class MatchInputHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "match_input" def __init__(self, *args, **kw): super(MatchInputHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 def _render(self, *args, **kw): path = os.path.join(os.path.dirname(__file__), "../templates/matchinput.html") return template.render(path, self.template_values) class WebhookDocumentationHandler(CacheableHandler): CACHE_VERSION = 1 CACHE_KEY_FORMAT = "webhook_docs" def __init__(self, *args, **kw): super(WebhookDocumentationHandler, self).__init__(*args, **kw) self._cache_expiration = 60 * 60 * 24 * 7 def _render(self, *args, **kw): self.template_values['enabled'] = NotificationType.enabled_notifications self.template_values['types'] = NotificationType.types path = os.path.join(os.path.dirname(__file__), "../templates/webhookdocs.html") return template.render(path, self.template_values)
# Copyright 2015 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """SavedModel builder implementation.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from google.protobuf.any_pb2 import Any from tensorflow.core.framework import types_pb2 from tensorflow.core.protobuf import meta_graph_pb2 from tensorflow.core.protobuf import saved_model_pb2 from tensorflow.core.protobuf import saver_pb2 from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.lib.io import file_io from tensorflow.python.ops import variables from tensorflow.python.platform import tf_logging from tensorflow.python.saved_model import constants from tensorflow.python.training import saver as tf_saver from tensorflow.python.util import compat class SavedModelBuilder(object): """Builds the `SavedModel` protocol buffer and saves variables and assets. The `SavedModelBuilder` class provides functionality to build a `SavedModel` protocol buffer. Specifically, this allows multiple meta graphs to be saved as part of a single language-neutral `SavedModel`, while sharing variables and assets. To build a SavedModel, the first meta graph must be saved with variables. Subsequent meta graphs will simply be saved with their graph definitions. If assets need to be saved and written or copied to disk, they can be provided when the meta graph def is added. If multiple meta graph defs are associated an asset of the same name, only the first version is retained. Each meta graph added to the SavedModel must be annotated with tags. The tags provide a means to identify the specific meta graph to load and restore, along with the shared set of variables and assets. Typical usage for the `SavedModelBuilder`: ```python ... builder = tf.saved_model.builder.SavedModelBuilder(export_dir) with tf.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph_and_variables(sess, ["foo-tag"], signature_def_map=foo_signatures, assets_collection=foo_assets) ... with tf.Session(graph=tf.Graph()) as sess: ... builder.add_meta_graph(["bar-tag", "baz-tag"]) ... builder.save() ``` """ def __init__(self, export_dir): self._saved_model = saved_model_pb2.SavedModel() self._saved_model.saved_model_schema_version = ( constants.SAVED_MODEL_SCHEMA_VERSION) self._export_dir = export_dir if file_io.file_exists(export_dir): raise AssertionError( "Export directory already exists. Please specify a different export " "directory: %s" % export_dir) file_io.recursive_create_dir(self._export_dir) # Boolean to track whether variables and assets corresponding to the # SavedModel have been saved. Specifically, the first meta graph to be added # MUST use the add_meta_graph_and_variables() API. Subsequent add operations # on the SavedModel MUST use the add_meta_graph() API which does not save # weights. self._has_saved_variables = False def _save_and_write_assets(self, assets_collection_to_add=None): """Saves asset to the meta graph and writes asset files to disk. Args: assets_collection_to_add: The collection where the asset paths are setup. """ asset_source_filepath_list = _maybe_save_assets(assets_collection_to_add) # Return if there are no assets to write. if len(asset_source_filepath_list) is 0: tf_logging.info("No assets to write.") return assets_destination_dir = os.path.join( compat.as_bytes(self._export_dir), compat.as_bytes(constants.ASSETS_DIRECTORY)) if not file_io.file_exists(assets_destination_dir): file_io.recursive_create_dir(assets_destination_dir) # Copy each asset from source path to destination path. for asset_source_filepath in asset_source_filepath_list: asset_source_filename = os.path.basename(asset_source_filepath) asset_destination_filepath = os.path.join( compat.as_bytes(assets_destination_dir), compat.as_bytes(asset_source_filename)) # Only copy the asset file to the destination if it does not already # exist. This is to ensure that an asset with the same name defined as # part of multiple graphs is only copied the first time. if not file_io.file_exists(asset_destination_filepath): file_io.copy(asset_source_filepath, asset_destination_filepath) tf_logging.info("Assets written to: %s", assets_destination_dir) def _maybe_add_legacy_init_op(self, legacy_init_op=None): """Add legacy init op to the SavedModel. Args: legacy_init_op: Optional legacy init op to support backward compatibility. Raises: TypeError if legacy init op is not of type `Operation`. """ if legacy_init_op is not None: if not isinstance(legacy_init_op, ops.Operation): raise TypeError("legacy_init_op needs to be an Operation: %r" % legacy_init_op) ops.add_to_collection(constants.LEGACY_INIT_OP_KEY, legacy_init_op) def _add_main_op(self, main_op): """Add main op to the SavedModel. Args: main_op: Main op to run as part of graph initialization. Raises: TypeError if main op is not of type `Operation`. """ if main_op is not None: if not isinstance(main_op, ops.Operation): raise TypeError("main_op needs to be an Operation: %r" % main_op) ops.add_to_collection(constants.MAIN_OP_KEY, main_op) def _tag_and_add_meta_graph(self, meta_graph_def, tags, signature_def_map): """Tags the meta graph def and adds it to the SavedModel. Tags the meta graph def with the supplied tags, adds signature defs to it if provided and appends the meta graph def to the SavedModel proto. Args: meta_graph_def: The meta graph def to add to the SavedModel. tags: The set of tags to annotate the meta graph def with. signature_def_map: The map of signature defs to be added to the meta graph def. """ for tag in tags: meta_graph_def.meta_info_def.tags.append(tag) if signature_def_map is not None: for key in signature_def_map: meta_graph_def.signature_def[key].CopyFrom(signature_def_map[key]) proto_meta_graph_def = self._saved_model.meta_graphs.add() proto_meta_graph_def.CopyFrom(meta_graph_def) def _validate_tensor_info(self, tensor_info): """Validates the `TensorInfo` proto. Checks if the `name` and `dtype` fields exist and are non-empty. Args: tensor_info: `TensorInfo` protocol buffer to validate. Raises: AssertionError: If the `name` or `dtype` fields of the supplied `TensorInfo` proto are not populated. """ if tensor_info is None: raise AssertionError( "All TensorInfo protos used in the SignatureDefs must have the name " "and dtype fields set.") if not tensor_info.name: raise AssertionError( "All TensorInfo protos used in the SignatureDefs must have the name " "field set: %s" % tensor_info) if tensor_info.dtype is types_pb2.DT_INVALID: raise AssertionError( "All TensorInfo protos used in the SignatureDefs must have the dtype " "field set: %s" % tensor_info) def _validate_signature_def_map(self, signature_def_map): """Validates the `SignatureDef` entries in the signature def map. Validation of entries in the signature def map includes ensuring that the `name` and `dtype` fields of the TensorInfo protos of the `inputs` and `outputs` of each `SignatureDef` are populated. Args: signature_def_map: The map of signature defs to be validated. """ if signature_def_map is not None: for signature_def_key in signature_def_map: signature_def = signature_def_map[signature_def_key] inputs = signature_def.inputs outputs = signature_def.outputs for inputs_key in inputs: self._validate_tensor_info(inputs[inputs_key]) for outputs_key in outputs: self._validate_tensor_info(outputs[outputs_key]) def add_meta_graph(self, tags, signature_def_map=None, assets_collection=None, legacy_init_op=None, clear_devices=False, main_op=None): """Adds the current meta graph to the SavedModel. Creates a Saver in the current scope and uses the Saver to export the meta graph def. Invoking this API requires the `add_meta_graph_and_variables()` API to have been invoked before. Args: tags: The set of tags to annotate the meta graph def with. signature_def_map: The map of signature defs to be added to the meta graph def. assets_collection: Assets collection to be saved with SavedModel. Note that this collection should be a subset of the assets saved as part of the first meta graph in the SavedModel. legacy_init_op: Legacy support for op or group of ops to execute after the restore op upon a load. clear_devices: Set to true if the device info on the default graph should be cleared. main_op: Op or group of ops to execute when the graph is loaded. Raises: AssertionError: If the variables for the SavedModel have not been saved yet. """ if not self._has_saved_variables: raise AssertionError( "Graph state including variables and assets has not been saved yet. " "Please invoke `add_meta_graph_and_variables()` first.") # Validate the signature def map to ensure all included TensorInfos are # properly populated. self._validate_signature_def_map(signature_def_map) # Save asset files and write them to disk, if any. self._save_and_write_assets(assets_collection) if main_op is None: # Add legacy init op to the SavedModel. self._maybe_add_legacy_init_op(legacy_init_op) else: self._add_main_op(main_op) # Initialize a saver to generate a sharded output for all saveables in the # current scope. saver = tf_saver.Saver( variables._all_saveable_objects(), # pylint: disable=protected-access sharded=True, write_version=saver_pb2.SaverDef.V2, allow_empty=True) # The graph almost certainly previously contained at least one Saver, and # possibly several (e.g. one for loading a pretrained embedding, and another # for the model weights). However, a *new* Saver was just created that # includes all of the variables. Removing the preexisting ones was the # motivation for the clear_extraneous_savers option, but it turns out that # there are edge cases where that option breaks the graph. Until that is # resolved, we just leave the option set to False for now. # TODO(soergel): Reinstate clear_extraneous_savers=True when possible. meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices) # Tag the meta graph def and add it to the SavedModel. self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map) def add_meta_graph_and_variables(self, sess, tags, signature_def_map=None, assets_collection=None, legacy_init_op=None, clear_devices=False, main_op=None): """Adds the current meta graph to the SavedModel and saves variables. Creates a Saver to save the variables from the provided session. Exports the corresponding meta graph def. This function assumes that the variables to be saved have been initialized. For a given `SavedModelBuilder`, this API must be called exactly once and for the first meta graph to save. For subsequent meta graph defs to be added, the `add_meta_graph()` API must be used. Args: sess: The TensorFlow session from which to save the meta graph and variables. tags: The set of tags with which to save the meta graph. signature_def_map: The map of signature def map to add to the meta graph def. assets_collection: Assets collection to be saved with SavedModel. legacy_init_op: Legacy support for op or group of ops to execute after the restore op upon a load. clear_devices: Set to true if the device info on the default graph should be cleared. main_op: Op or group of ops to execute when the graph is loaded. """ if self._has_saved_variables: raise AssertionError("Graph state including variables and assets has " "already been saved. Please invoke " "`add_meta_graph()` instead.") # Validate the signature def map to ensure all included TensorInfos are # properly populated. self._validate_signature_def_map(signature_def_map) # Save asset files and write them to disk, if any. self._save_and_write_assets(assets_collection) # Create the variables sub-directory, if it does not exist. variables_dir = os.path.join( compat.as_text(self._export_dir), compat.as_text(constants.VARIABLES_DIRECTORY)) if not file_io.file_exists(variables_dir): file_io.recursive_create_dir(variables_dir) variables_path = os.path.join( compat.as_text(variables_dir), compat.as_text(constants.VARIABLES_FILENAME)) if main_op is None: # Add legacy init op to the SavedModel. self._maybe_add_legacy_init_op(legacy_init_op) else: self._add_main_op(main_op) # Initialize a saver to generate a sharded output for all saveables in the # current scope. saver = tf_saver.Saver( variables._all_saveable_objects(), # pylint: disable=protected-access sharded=True, write_version=saver_pb2.SaverDef.V2, allow_empty=True) # Save the variables. Also, disable writing the checkpoint state proto. The # file is not used during SavedModel loading. In addition, since a # SavedModel can be copied or moved, this avoids the checkpoint state to # become outdated. saver.save(sess, variables_path, write_meta_graph=False, write_state=False) # Export the meta graph def. # The graph almost certainly previously contained at least one Saver, and # possibly several (e.g. one for loading a pretrained embedding, and another # for the model weights). However, a *new* Saver was just created that # includes all of the variables. Removing the preexisting ones was the # motivation for the clear_extraneous_savers option, but it turns out that # there are edge cases where that option breaks the graph. Until that is # resolved, we just leave the option set to False for now. # TODO(soergel): Reinstate clear_extraneous_savers=True when possible. meta_graph_def = saver.export_meta_graph(clear_devices=clear_devices) # Tag the meta graph def and add it to the SavedModel. self._tag_and_add_meta_graph(meta_graph_def, tags, signature_def_map) # Mark this instance of SavedModel as having saved variables, such that # subsequent attempts to save variables will fail. self._has_saved_variables = True def save(self, as_text=False): """Writes a `SavedModel` protocol buffer to disk. The function writes the SavedModel protocol buffer to the export directory in serialized format. Args: as_text: Writes the SavedModel protocol buffer in text format to disk. Returns: The path to which the SavedModel protocol buffer was written. """ if not file_io.file_exists(self._export_dir): file_io.recursive_create_dir(self._export_dir) if as_text: path = os.path.join( compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PBTXT)) file_io.write_string_to_file(path, str(self._saved_model)) else: path = os.path.join( compat.as_bytes(self._export_dir), compat.as_bytes(constants.SAVED_MODEL_FILENAME_PB)) file_io.write_string_to_file(path, self._saved_model.SerializeToString()) tf_logging.info("SavedModel written to: %s", path) return path def _maybe_save_assets(assets_collection_to_add=None): """Saves assets to the meta graph. Args: assets_collection_to_add: The collection where the asset paths are setup. Returns: The list of filepaths to the assets in the assets collection. Raises: ValueError: Indicating an invalid filepath tensor. """ asset_source_filepath_list = [] if assets_collection_to_add is None: tf_logging.info("No assets to save.") return asset_source_filepath_list # Iterate over the supplied asset collection, build the `AssetFile` proto # and add them to the collection with key `constants.ASSETS_KEY`, in the # graph. for asset_tensor in assets_collection_to_add: asset_source_filepath = _asset_path_from_tensor(asset_tensor) if not asset_source_filepath: raise ValueError("Invalid asset filepath tensor %s" % asset_tensor) asset_source_filename = os.path.basename(asset_source_filepath) # Build `AssetFile` proto and add it to the asset collection in the graph. _add_asset_to_collection(asset_source_filename, asset_tensor) asset_source_filepath_list.append(asset_source_filepath) tf_logging.info("Assets added to graph.") return asset_source_filepath_list def _asset_path_from_tensor(path_tensor): """Returns the filepath value stored in constant `path_tensor`. Args: path_tensor: Tensor of a file-path. Returns: The string value i.e. path of the tensor, if valid. Raises: TypeError if tensor does not match expected op type, dtype or value. """ if not isinstance(path_tensor, ops.Tensor): raise TypeError("Asset path tensor must be a Tensor.") if path_tensor.op.type != "Const": raise TypeError("Asset path tensor must be of type constant.") if path_tensor.dtype != dtypes.string: raise TypeError("Asset path tensor must be of dtype string.") str_values = path_tensor.op.get_attr("value").string_val if len(str_values) != 1: raise TypeError("Asset path tensor must be a scalar.") return str_values[0] def _add_asset_to_collection(asset_filename, asset_tensor): """Builds an asset proto and adds it to the asset collection of the graph. Args: asset_filename: The filename of the asset to be added. asset_tensor: The asset tensor used to populate the tensor info of the asset proto. """ asset_proto = meta_graph_pb2.AssetFileDef() asset_proto.filename = asset_filename asset_proto.tensor_info.name = asset_tensor.name asset_any_proto = Any() asset_any_proto.Pack(asset_proto) ops.add_to_collection(constants.ASSETS_KEY, asset_any_proto)
# Copyright (c) 2013-2019 Philip Hane # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. from . import Net from .asn import IPASN from .nir import NIRWhois import logging log = logging.getLogger(__name__) class IPWhois: """ The wrapper class for performing whois/RDAP lookups and parsing for IPv4 and IPv6 addresses. Args: address (:obj:`str`/:obj:`int`/:obj:`IPv4Address`/:obj:`IPv6Address`): An IPv4 or IPv6 address timeout (:obj:`int`): The default timeout for socket connections in seconds. Defaults to 5. proxy_opener (:obj:`urllib.request.OpenerDirector`): The request for proxy support. Defaults to None. allow_permutations (:obj:`bool`): Allow net.Net() to use additional methods if DNS lookups to Cymru fail. *WARNING* deprecated in favor of new argument asn_methods. Defaults to False. """ def __init__(self, address, timeout=5, proxy_opener=None, allow_permutations=False): self.net = Net( address=address, timeout=timeout, proxy_opener=proxy_opener, allow_permutations=allow_permutations ) self.ipasn = IPASN(self.net) self.address = self.net.address self.timeout = self.net.timeout self.address_str = self.net.address_str self.version = self.net.version self.reversed = self.net.reversed self.dns_zone = self.net.dns_zone def __repr__(self): return 'IPWhois({0}, {1}, {2})'.format( self.address_str, str(self.timeout), repr(self.net.opener) ) def lookup_whois(self, inc_raw=False, retry_count=3, get_referral=False, extra_blacklist=None, ignore_referral_errors=False, field_list=None, asn_alts=None, extra_org_map=None, inc_nir=True, nir_field_list=None, asn_methods=None, get_asn_description=True): """ The function for retrieving and parsing whois information for an IP address via port 43 (WHOIS). Args: inc_raw (:obj:`bool`): Whether to include the raw whois results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. get_referral (:obj:`bool`): Whether to retrieve referral whois information, if available. Defaults to False. extra_blacklist (:obj:`list`): Blacklisted whois servers in addition to the global BLACKLIST. Defaults to None. ignore_referral_errors (:obj:`bool`): Whether to ignore and continue when an exception is encountered on referral whois lookups. Defaults to False. field_list (:obj:`list`): If provided, a list of fields to parse: ['name', 'handle', 'description', 'country', 'state', 'city', 'address', 'postal_code', 'emails', 'created', 'updated'] If None, defaults to all. asn_alts (:obj:`list`): Additional lookup types to attempt if the ASN dns lookup fails. Allow permutations must be enabled. If None, defaults to all ['whois', 'http']. *WARNING* deprecated in favor of new argument asn_methods. extra_org_map (:obj:`dict`): Dictionary mapping org handles to RIRs. This is for limited cases where ARIN REST (ASN fallback HTTP lookup) does not show an RIR as the org handle e.g., DNIC (which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are (note the case-sensitive - this is meant to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic' Defaults to None. inc_nir (:obj:`bool`): Whether to retrieve NIR (National Internet Registry) information, if registry is JPNIC (Japan) or KRNIC (Korea). If True, extra network requests will be required. If False, the information returned for JP or KR IPs is severely restricted. Defaults to True. nir_field_list (:obj:`list`): If provided and inc_nir, a list of fields to parse: ['name', 'handle', 'country', 'address', 'postal_code', 'nameservers', 'created', 'updated', 'contacts'] If None, defaults to all. asn_methods (:obj:`list`): ASN lookup types to attempt, in order. If None, defaults to all ['dns', 'whois', 'http']. get_asn_description (:obj:`bool`): Whether to run an additional query when pulling ASN information via dns, in order to get the ASN description. Defaults to True. Returns: dict: The IP whois lookup results :: { 'query' (str) - The IP address 'asn' (str) - The Autonomous System Number 'asn_date' (str) - The ASN Allocation date 'asn_registry' (str) - The assigned ASN registry 'asn_cidr' (str) - The assigned ASN CIDR 'asn_country_code' (str) - The assigned ASN country code 'asn_description' (str) - The ASN description 'nets' (list) - Dictionaries containing network information which consists of the fields listed in the ipwhois.whois.RIR_WHOIS dictionary. 'raw' (str) - Raw whois results if the inc_raw parameter is True. 'referral' (dict) - Referral whois information if get_referral is True and the server is not blacklisted. Consists of fields listed in the ipwhois.whois.RWHOIS dictionary. 'raw_referral' (str) - Raw referral whois results if the inc_raw parameter is True. 'nir' (dict) - ipwhois.nir.NIRWhois() results if inc_nir is True. } """ from .whois import Whois # Create the return dictionary. results = {'nir': None} # Retrieve the ASN information. log.debug('ASN lookup for {0}'.format(self.address_str)) asn_data = self.ipasn.lookup( inc_raw=inc_raw, retry_count=retry_count, asn_alts=asn_alts, extra_org_map=extra_org_map, asn_methods=asn_methods, get_asn_description=get_asn_description ) # Add the ASN information to the return dictionary. results.update(asn_data) # Retrieve the whois data and parse. whois = Whois(self.net) log.debug('WHOIS lookup for {0}'.format(self.address_str)) whois_data = whois.lookup( inc_raw=inc_raw, retry_count=retry_count, response=None, get_referral=get_referral, extra_blacklist=extra_blacklist, ignore_referral_errors=ignore_referral_errors, asn_data=asn_data, field_list=field_list ) # Add the WHOIS information to the return dictionary. results.update(whois_data) if inc_nir: nir = None if 'JP' == asn_data['asn_country_code']: nir = 'jpnic' elif 'KR' == asn_data['asn_country_code']: nir = 'krnic' if nir: nir_whois = NIRWhois(self.net) nir_data = nir_whois.lookup( nir=nir, inc_raw=inc_raw, retry_count=retry_count, response=None, field_list=nir_field_list, is_offline=False ) # Add the NIR information to the return dictionary. results['nir'] = nir_data return results def lookup_rdap(self, inc_raw=False, retry_count=3, depth=0, excluded_entities=None, bootstrap=False, rate_limit_timeout=120, asn_alts=None, extra_org_map=None, inc_nir=True, nir_field_list=None, asn_methods=None, get_asn_description=True): """ The function for retrieving and parsing whois information for an IP address via HTTP (RDAP). **This is now the recommended method, as RDAP contains much better information to parse.** Args: inc_raw (:obj:`bool`): Whether to include the raw whois results in the returned dictionary. Defaults to False. retry_count (:obj:`int`): The number of times to retry in case socket errors, timeouts, connection resets, etc. are encountered. Defaults to 3. depth (:obj:`int`): How many levels deep to run queries when additional referenced objects are found. Defaults to 0. excluded_entities (:obj:`list`): Entity handles to not perform lookups. Defaults to None. bootstrap (:obj:`bool`): If True, performs lookups via ARIN bootstrap rather than lookups based on ASN data. ASN lookups are not performed and no output for any of the asn* fields is provided. Defaults to False. rate_limit_timeout (:obj:`int`): The number of seconds to wait before retrying when a rate limit notice is returned via rdap+json. Defaults to 120. asn_alts (:obj:`list`): Additional lookup types to attempt if the ASN dns lookup fails. Allow permutations must be enabled. If None, defaults to all ['whois', 'http']. *WARNING* deprecated in favor of new argument asn_methods. extra_org_map (:obj:`dict`): Dictionary mapping org handles to RIRs. This is for limited cases where ARIN REST (ASN fallback HTTP lookup) does not show an RIR as the org handle e.g., DNIC (which is now the built in ORG_MAP) e.g., {'DNIC': 'arin'}. Valid RIR values are (note the case-sensitive - this is meant to match the REST result): 'ARIN', 'RIPE', 'apnic', 'lacnic', 'afrinic' Defaults to None. inc_nir (:obj:`bool`): Whether to retrieve NIR (National Internet Registry) information, if registry is JPNIC (Japan) or KRNIC (Korea). If True, extra network requests will be required. If False, the information returned for JP or KR IPs is severely restricted. Defaults to True. nir_field_list (:obj:`list`): If provided and inc_nir, a list of fields to parse: ['name', 'handle', 'country', 'address', 'postal_code', 'nameservers', 'created', 'updated', 'contacts'] If None, defaults to all. asn_methods (:obj:`list`): ASN lookup types to attempt, in order. If None, defaults to all ['dns', 'whois', 'http']. get_asn_description (:obj:`bool`): Whether to run an additional query when pulling ASN information via dns, in order to get the ASN description. Defaults to True. Returns: dict: The IP RDAP lookup results :: { 'query' (str) - The IP address 'asn' (str) - The Autonomous System Number 'asn_date' (str) - The ASN Allocation date 'asn_registry' (str) - The assigned ASN registry 'asn_cidr' (str) - The assigned ASN CIDR 'asn_country_code' (str) - The assigned ASN country code 'asn_description' (str) - The ASN description 'entities' (list) - Entity handles referred by the top level query. 'network' (dict) - Network information which consists of the fields listed in the ipwhois.rdap._RDAPNetwork dict. 'objects' (dict) - Mapping of entity handle->entity dict which consists of the fields listed in the ipwhois.rdap._RDAPEntity dict. The raw result is included for each object if the inc_raw parameter is True. 'raw' (dict) - Whois results in json format if the inc_raw parameter is True. 'nir' (dict) - ipwhois.nir.NIRWhois results if inc_nir is True. } """ from .rdap import RDAP # Create the return dictionary. results = {'nir': None} asn_data = None response = None if not bootstrap: # Retrieve the ASN information. log.debug('ASN lookup for {0}'.format(self.address_str)) asn_data = self.ipasn.lookup( inc_raw=inc_raw, retry_count=retry_count, asn_alts=asn_alts, extra_org_map=extra_org_map, asn_methods=asn_methods, get_asn_description=get_asn_description ) # Add the ASN information to the return dictionary. results.update(asn_data) # Retrieve the RDAP data and parse. rdap = RDAP(self.net) log.debug('RDAP lookup for {0}'.format(self.address_str)) rdap_data = rdap.lookup( inc_raw=inc_raw, retry_count=retry_count, asn_data=asn_data, depth=depth, excluded_entities=excluded_entities, response=response, bootstrap=bootstrap, rate_limit_timeout=rate_limit_timeout ) # Add the RDAP information to the return dictionary. results.update(rdap_data) if inc_nir: nir = None if 'JP' == asn_data['asn_country_code']: nir = 'jpnic' elif 'KR' == asn_data['asn_country_code']: nir = 'krnic' if nir: nir_whois = NIRWhois(self.net) nir_data = nir_whois.lookup( nir=nir, inc_raw=inc_raw, retry_count=retry_count, response=None, field_list=nir_field_list, is_offline=False ) # Add the NIR information to the return dictionary. results['nir'] = nir_data return results
from __future__ import unicode_literals, division, absolute_import from builtins import * # pylint: disable=unused-import, redefined-builtin from future.utils import PY2 import logging import re import sys from datetime import datetime from path import Path from flexget import plugin from flexget.config_schema import one_or_more from flexget.event import event from flexget.entry import Entry log = logging.getLogger('filesystem') class Filesystem(object): """ Uses local path content as an input. Can use recursion if configured. Recursion is False by default. Can be configured to true or get integer that will specify max depth in relation to base folder. All files/dir/symlinks are retrieved by default. Can be changed by using the 'retrieve' property. Example 1:: Single path filesystem: /storage/movies/ Example 2:: List of paths filesystem: - /storage/movies/ - /storage/tv/ Example 3:: Object with list of paths filesystem: path: - /storage/movies/ - /storage/tv/ mask: '*.mkv' Example 4:: filesystem: path: - /storage/movies/ - /storage/tv/ recursive: 4 # 4 levels deep from each base folder retrieve: files # Only files will be retrieved Example 5:: filesystem: path: - /storage/movies/ - /storage/tv/ recursive: yes # No limit to depth, all sub dirs will be accessed retrieve: # Only files and dirs will be retrieved - files - dirs """ retrieval_options = ['files', 'dirs', 'symlinks'] paths = one_or_more({'type': 'string', 'format': 'path'}, unique_items=True) schema = { 'oneOf': [ paths, {'type': 'object', 'properties': { 'path': paths, 'mask': {'type': 'string'}, 'regexp': {'type': 'string', 'format': 'regex'}, 'recursive': {'oneOf': [{'type': 'integer', 'minimum': 2}, {'type': 'boolean'}]}, 'retrieve': one_or_more({'type': 'string', 'enum': retrieval_options}, unique_items=True) }, 'required': ['path'], 'additionalProperties': False}] } def prepare_config(self, config): from fnmatch import translate config = config # Converts config to a dict with a list of paths if not isinstance(config, dict): config = {'path': config} if not isinstance(config['path'], list): config['path'] = [config['path']] config.setdefault('recursive', False) # If mask was specified, turn it in to a regexp if config.get('mask'): config['regexp'] = translate(config['mask']) # If no mask or regexp specified, accept all files config.setdefault('regexp', '.') # Sets the default retrieval option to files config.setdefault('retrieve', self.retrieval_options) return config def create_entry(self, filepath, test_mode): """ Creates a single entry using a filepath and a type (file/dir) """ filepath = filepath.abspath() entry = Entry() entry['location'] = filepath if PY2: import urllib, urlparse entry['url'] = urlparse.urljoin('file:', urllib.pathname2url(filepath.encode('utf8'))) else: import pathlib entry['url'] = pathlib.Path(filepath).absolute().as_uri() entry['filename'] = filepath.name if filepath.isfile(): entry['title'] = filepath.namebase else: entry['title'] = filepath.name try: entry['timestamp'] = datetime.fromtimestamp(filepath.getmtime()) except Exception as e: log.warning('Error setting timestamp for %s: %s' % (filepath, e)) entry['timestamp'] = None entry['accessed'] = datetime.fromtimestamp(filepath.getatime()) entry['modified'] = datetime.fromtimestamp(filepath.getmtime()) entry['created'] = datetime.fromtimestamp(filepath.getctime()) if entry.isvalid(): if test_mode: log.info("Test mode. Entry includes:") log.info(" Title: %s" % entry["title"]) log.info(" URL: %s" % entry["url"]) log.info(" Filename: %s" % entry["filename"]) log.info(" Location: %s" % entry["location"]) log.info(" Timestamp: %s" % entry["timestamp"]) return entry else: log.error('Non valid entry created: %s ' % entry) return def get_max_depth(self, recursion, base_depth): if recursion is False: return base_depth + 1 elif recursion is True: return float('inf') else: return base_depth + recursion def get_folder_objects(self, folder, recursion): if recursion is False: return folder.listdir() else: return folder.walk(errors='ignore') def get_entries_from_path(self, path_list, match, recursion, test_mode, get_files, get_dirs, get_symlinks): entries = [] for folder in path_list: log.verbose('Scanning folder %s. Recursion is set to %s.' % (folder, recursion)) folder = Path(folder).expanduser() log.debug('Scanning %s' % folder) base_depth = len(folder.splitall()) max_depth = self.get_max_depth(recursion, base_depth) folder_objects = self.get_folder_objects(folder, recursion) for path_object in folder_objects: log.debug('Checking if %s qualifies to be added as an entry.' % path_object) try: path_object.exists() except UnicodeError: log.error('File %s not decodable with filesystem encoding: %s' % (path_object, sys.getfilesystemencoding())) continue entry = None object_depth = len(path_object.splitall()) if object_depth <= max_depth: if match(path_object): if (path_object.isdir() and get_dirs) or ( path_object.islink() and get_symlinks) or ( path_object.isfile() and not path_object.islink() and get_files): entry = self.create_entry(path_object, test_mode) else: log.debug("Path object's %s type doesn't match requested object types." % path_object) if entry and entry not in entries: entries.append(entry) return entries def on_task_input(self, task, config): config = self.prepare_config(config) path_list = config['path'] test_mode = task.options.test match = re.compile(config['regexp'], re.IGNORECASE).match recursive = config['recursive'] get_files = 'files' in config['retrieve'] get_dirs = 'dirs' in config['retrieve'] get_symlinks = 'symlinks' in config['retrieve'] log.verbose('Starting to scan folders.') return self.get_entries_from_path(path_list, match, recursive, test_mode, get_files, get_dirs, get_symlinks) @event('plugin.register') def register_plugin(): plugin.register(Filesystem, 'filesystem', api_ver=2)
#!/usr/bin/python # -*- coding: utf-8 -*- # # Plecost: Wordpress vulnerabilities finder # # @url: http://iniqua.com/labs/ # @url: https://github.com/iniqua/plecost # # @author:Francisco J. Gomez aka ffranz (http://iniqua.com/) # @author:Daniel Garcia aka cr0hn (http://www.cr0hn.com/me/) # # Copyright (c) 2015, Iniqua Team # All rights reserved. # # Redistribution and use in source and binary forms, with or without modification, are permitted provided that the # following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the # following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the # following disclaimer in the documentation and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote # products derived from this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, # SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # """ This file contains function to looking for WordPress plugins and versions """ import csv import aiohttp import asyncio from datetime import datetime from functools import partial from urllib.parse import urlparse from os.path import join from .db import DB from .data import * # noqa from .exceptions import * # noqa from .plugins_utils import plugins_testing from .utils import colorize, generate_error_page, download, get_data_folder from .wordpress_core import is_remote_a_wordpress, get_wordpress_version, get_wordpress_vulnerabilities # ---------------------------------------------------------------------- # Main code of functions # ---------------------------------------------------------------------- def find_versions(args): """ Main function to run libs as version finder. :param args: PlecostOptions object :type args: `PlecostOptions` :return: PlecostResults object. :rtype: `PlecostResults` :raises: PlecostTargetNotAvailable, PlecostNotWordPressFound """ # -------------------------------------------------------------------------- # Common vars # -------------------------------------------------------------------------- url = args.target parsed_url = urlparse(args.target) host = parsed_url.hostname concurrency = args.concurrency log = args.log_function proxy = args.proxy is_color = args.colorize start_time = datetime.now() no_check_wordpress = args.no_check_wordpress no_check_plugins = args.no_check_plugins no_check_wordpress_version = args.no_check_wordpress_version force_scan = args.force_scan # Jackass mode is set? if args.jackass is True: concurrency = 9999 # Non-blocking config loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) con = aiohttp.TCPConnector(conn_timeout=10, share_cookies=True, loop=loop, verify_ssl=False) _download = partial(download, max_redirect=0, connector=con, loop=loop) # Get CVE database db = DB(path=join(get_data_folder(), "cve.db")) # -------------------------------------------------------------------------- # Test availability of target # -------------------------------------------------------------------------- log("[*] Testing target connection...") headers, status, content = loop.run_until_complete(_download(url, method="get", get_content=False)) # Detect redirect if status in (300, 301, 302, 303, 307): url = headers.get("location", None) if url is not None: log("\n[%s] Redirection detected to '%s'. Using it now. " % (colorize("ii", "yellow"), url), log_level=1) else: raise PlecostTargetNotAvailable("Redirection detected, but can't determinate the new location") log(colorize(" ok!\n")) # -------------------------------------------------------------------------- # Check if remote host is a WordPress # -------------------------------------------------------------------------- if no_check_wordpress is False: log("[*] Looking for WordPress installation...\n") # Error page content. headers, status, error_page = loop.run_until_complete(_download(generate_error_page(url))) _is_wp = loop.run_until_complete(is_remote_a_wordpress(url, error_page, _download)) if not _is_wp: if force_scan is False: raise PlecostNotWordPressFound("No WordPress installations found in '%s'." % host) else: log(colorize("\n No Wordpress installation found!\n", "yellow")) else: log("\n %s" % colorize(" ok!\n")) # -------------------------------------------------------------------------- # Check WordPress version # -------------------------------------------------------------------------- if no_check_wordpress_version is False: log("[*] Getting WordPress version... ") wordpress_version = loop.run_until_complete(get_wordpress_version(url, _download, db)) # wordpress_version. if wordpress_version: log("%s (latest: %s)" % ( colorize("%s" % wordpress_version.current_version, "red" if wordpress_version.is_outdated is True else "blue"), colorize("%s" % wordpress_version.latest_version) ), 0) # -------------------------------------------------------------------------- # Looking for CVEs for installed Wordpress version # -------------------------------------------------------------------------- if wordpress_version.vulnerabilities: log("\n |_CVE list:\n") for cve in wordpress_version.vulnerabilities: log(" |__%(cve)s: (http://cve.mitre.org/cgi-bin/cvename.cgi?name=%(cve)s)\n" % {"cve": colorize(cve, "red")}) log("\n") else: log(colorize("Unknown!\n", "red")) log("\n") else: wordpress_version = PlecostWordPressInfo(last_version="", current_version="", vulnerabilities=[]) # -------------------------------------------------------------------------- # Check the plugins # -------------------------------------------------------------------------- # Read plugins file and remove \n and \r plugins_info = [] if no_check_plugins is False: plugins = [] plugins_append = plugins.append with open(args.wordlist, "rU") as f: for plugin in f: plugins_append(plugin.replace("\n", "").replace("\r", "")) # Prepare csv file cve_info = csv.reader(plugins) error_page = "" # Find plugins log("[*] Looking for plugins (wordlist: %s) ... " % args.wordlist[args.wordlist.rfind("/") + 1:], 0) plugins_info = loop.run_until_complete(plugins_testing(url, error_page, log, cve_info, db, concurrency, loop, con=con)) log("\n[*] Done! \n") # Set finish time end_time = datetime.now() # -------------------------------------------------------------------------- # Clean up # -------------------------------------------------------------------------- con.close() # -------------------------------------------------------------------------- # Make results # -------------------------------------------------------------------------- return PlecostResults(target=args.target, start_time=start_time, end_time=end_time, wordpress_info=wordpress_version, plugins=plugins_info) __all__ = ["find_versions", "_is_remote_a_wordpress"]
# # Copyright (c) 2014 Juniper Networks, Inc. All rights reserved. # """ This file contains implementation of data model for SVC monitor """ from pysandesh.gen_py.sandesh.ttypes import SandeshLevel from cfgm_common.vnc_db import DBBase from cfgm_common import svc_info class DBBaseSM(DBBase): obj_type = __name__ class LoadbalancerPoolSM(DBBaseSM): _dict = {} obj_type = 'loadbalancer_pool' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.members = set() self.loadbalancer_healthmonitors = set() self.service_instance = None self.virtual_machine_interface = None self.virtual_ip = None self.update(obj_dict) self.last_sent = None # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj.get('loadbalancer_pool_properties', None) self.provider = obj.get('loadbalancer_pool_provider', None) self.members = set([lm['uuid'] for lm in obj.get('loadbalancer_members', [])]) self.id_perms = obj.get('id_perms', None) self.parent_uuid = obj['parent_uuid'] self.display_name = obj.get('display_name', None) self.update_single_ref('service_instance', obj) self.update_single_ref('virtual_ip', obj) self.update_single_ref('virtual_machine_interface', obj) self.update_multiple_refs('loadbalancer_healthmonitor', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.loadbalancer_agent.delete_loadbalancer_pool(obj) obj.update_single_ref('service_instance', {}) obj.update_single_ref('virtual_ip', {}) obj.update_single_ref('virtual_machine_interface', {}) obj.update_multiple_refs('loadbalancer_healthmonitor', {}) del cls._dict[uuid] # end delete def add(self): self.last_sent = \ self._manager.loadbalancer_agent.loadbalancer_pool_add(self) if len(self.members): for member in self.members: member_obj = LoadbalancerMemberSM.get(member) if member_obj: member_obj.last_sent = \ self._manager.loadbalancer_agent.loadbalancer_member_add(member_obj) if self.virtual_ip: vip_obj = VirtualIpSM.get(self.virtual_ip) if vip_obj: vip_obj.last_sent = \ self._manager.loadbalancer_agent.virtual_ip_add(vip_obj) # end add # end class LoadbalancerPoolSM class LoadbalancerMemberSM(DBBaseSM): _dict = {} obj_type = 'loadbalancer_member' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.loadbalancer_pool = {} self.update(obj_dict) self.last_sent = None if self.loadbalancer_pool: parent = LoadbalancerPoolSM.get(self.loadbalancer_pool) parent.members.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj.get('loadbalancer_member_properties', None) self.loadbalancer_pool = self.get_parent_uuid(obj) self.id_perms = obj.get('id_perms', None) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.loadbalancer_agent.delete_loadbalancer_member(obj) if obj.loadbalancer_pool: parent = LoadbalancerPoolSM.get(obj.loadbalancer_pool) if parent: parent.members.discard(obj.uuid) del cls._dict[uuid] # end delete # end class LoadbalancerMemberSM class VirtualIpSM(DBBaseSM): _dict = {} obj_type = 'virtual_ip' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interface = None self.loadbalancer_pool = None self.update(obj_dict) self.last_sent = None # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj.get('virtual_ip_properties', None) self.update_single_ref('virtual_machine_interface', obj) self.update_single_ref('loadbalancer_pool', obj) self.id_perms = obj.get('id_perms', None) self.parent_uuid = obj['parent_uuid'] self.display_name = obj.get('display_name', None) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.loadbalancer_agent.delete_virtual_ip(obj) obj.update_single_ref('virtual_machine_interface', {}) obj.update_single_ref('loadbalancer_pool', {}) del cls._dict[uuid] # end delete # end class VirtualIpSM class HealthMonitorSM(DBBaseSM): _dict = {} obj_type = 'loadbalancer_healthmonitor' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.loadbalancer_pools = set() self.last_sent = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.params = obj.get('loadbalancer_healthmonitor_properties', None) self.update_multiple_refs('loadbalancer_pool', obj) self.id_perms = obj.get('id_perms', None) self.parent_uuid = obj['parent_uuid'] self.display_name = obj.get('display_name', None) self.last_sent = self._manager.loadbalancer_agent.update_hm(self) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('loadbalancer_pool', {}) del cls._dict[uuid] # end delete # end class HealthMonitorSM class VirtualMachineSM(DBBaseSM): _dict = {} obj_type = 'virtual_machine' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_instance = None self.virtual_router = None self.virtual_machine_interfaces = set() self.virtualization_type = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_single_ref('service_instance', obj) self.update_single_ref('virtual_router', obj) self.update_multiple_refs('virtual_machine_interface', obj) self.display_name = obj.get('display_name', None) if self.display_name is None: return display_list = self.display_name.split('__') if self.service_instance and len(display_list) == 5: self.virtualization_type = display_list[-1] self.proj_fq_name = display_list[0:2] self.index = int(display_list[-2]) - 1 # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('service_instance', {}) obj.update_single_ref('virtual_router', {}) obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end VirtualMachineSM class VirtualRouterSM(DBBaseSM): _dict = {} obj_type = 'virtual_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machines = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_multiple_refs('virtual_machine', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine', {}) del cls._dict[uuid] # end delete # end VirtualRouterSM class VirtualMachineInterfaceSM(DBBaseSM): _dict = {} obj_type = 'virtual_machine_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.params = None self.if_type = None self.virtual_ip = None self.virtual_network = None self.virtual_machine = None self.loadbalancer_pool = None self.logical_interface = None self.instance_ip = None self.floating_ip = None self.interface_route_table = None self.security_group = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] if obj.get('virtual_machine_interface_properties', None): self.params = obj['virtual_machine_interface_properties'] self.if_type = self.params.get('service_interface_type', None) self.update_single_ref('virtual_ip', obj) self.update_single_ref('loadbalancer_pool', obj) self.update_single_ref('instance_ip', obj) self.update_single_ref('floating_ip', obj) self.update_single_ref('virtual_network', obj) self.update_single_ref('virtual_machine', obj) self.update_single_ref('logical_interface', obj) self.update_single_ref('interface_route_table', obj) self.update_single_ref('security_group', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('virtual_ip', {}) obj.update_single_ref('loadbalancer_pool', {}) obj.update_single_ref('instance_ip', {}) obj.update_single_ref('floating_ip', {}) obj.update_single_ref('virtual_network', {}) obj.update_single_ref('virtual_machine', {}) obj.update_single_ref('logical_interface', {}) obj.update_single_ref('interface_route_table', {}) obj.update_single_ref('security_group', {}) del cls._dict[uuid] # end delete # end VirtualMachineInterfaceSM class ServiceInstanceSM(DBBaseSM): _dict = {} obj_type = 'service_instance' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_template = None self.loadbalancer_pool = None self.virtual_machines = set() self.params = None self.state = 'init' self.launch_count = 0 self.image = None self.flavor = None self.max_instances = 0 self.availability_zone = None self.ha_mode = None self.vr_id = None self.vn_changed = False self.local_preference = [None, None] self.vn_info = [] self.update(obj_dict) if self.ha_mode == 'active-standby': self.max_instances = 2 self.local_preference = [svc_info.get_active_preference(), svc_info.get_standby_preference()] # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.proj_name = obj['fq_name'][-2] self.check_vn_changes(obj) self.params = obj.get('service_instance_properties', None) self.update_single_ref('service_template', obj) self.update_single_ref('loadbalancer_pool', obj) self.update_multiple_refs('virtual_machine', obj) self.id_perms = obj.get('id_perms', None) if not self.params: return self.vr_id = self.params.get('virtual_router_id', None) self.ha_mode = self.params.get('ha_mode', None) if self.ha_mode != 'active-standby': scale_out = self.params.get('scale_out', None) if scale_out: self.max_instances = scale_out.get('max_instances', 1) # end update def check_vn_changes(self, obj): self.vn_changed = False if not self.params or not obj.get('service_instance_properties'): return old_ifs = self.params.get('interface_list', []) new_ifs = obj['service_instance_properties'].get('interface_list', []) for index in range(0, len(old_ifs)): try: old_if = old_ifs[index] new_if = new_ifs[index] except IndexError: continue if not old_if['virtual_network'] or not new_if['virtual_network']: continue if old_if['virtual_network'] != new_if['virtual_network']: self.vn_changed = True return #end check_vn_changes @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_single_ref('service_template', {}) obj.update_single_ref('loadbalancer_pool', {}) obj.update_multiple_refs('virtual_machine', {}) del cls._dict[uuid] # end delete # end class ServiceInstanceSM class ServiceTemplateSM(DBBaseSM): _dict = {} obj_type = 'service_template' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_instances = set() self.virtualization_type = 'virtual-machine' self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.params = obj.get('service_template_properties') if self.params: self.virtualization_type = self.params.get( 'service_virtualization_type') or 'virtual-machine' self.update_multiple_refs('service_instance', obj) self.id_perms = obj.get('id_perms', None) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('service_instance', {}) del cls._dict[uuid] # end delete # end class ServiceTemplateSM class VirtualNetworkSM(DBBaseSM): _dict = {} obj_type = 'virtual_network' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interfaces = set() obj_dict = self.update(obj_dict) self.add_to_parent(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_multiple_refs('virtual_machine_interface', obj) return obj # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine_interface', {}) obj.remove_from_parent() del cls._dict[uuid] # end delete # end class VirtualNetworkSM class FloatingIpSM(DBBaseSM): _dict = {} obj_type = 'floating_ip' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.address = None self.virtual_machine_interfaces = set() self.virtual_ip = None self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.address = obj['floating_ip_address'] self.update_multiple_refs('virtual_machine_interface', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end class FloatingIpSM class InstanceIpSM(DBBaseSM): _dict = {} obj_type = 'instance_ip' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.address = None self.virtual_machine_interfaces = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.address = obj.get('instance_ip_address', None) self.update_multiple_refs('virtual_machine_interface', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end class InstanceIpSM class LogicalInterfaceSM(DBBaseSM): _dict = {} obj_type = 'logical_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interface = None self.logical_interface_vlan_tag = 0 self.update(obj_dict) if self.physical_interface: parent = PhysicalInterfaceSM.get(self.physical_interface) elif self.physical_router: parent = PhysicalRouterSM.get(self.physical_router) if parent: parent.logical_interfaces.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) if obj['parent_type'] == 'physical-router': self.physical_router = self.get_parent_uuid(obj) self.physical_interface = None else: self.physical_interface = self.get_parent_uuid(obj) self.physical_router = None self.update_single_ref('virtual_machine_interface', obj) self.name = obj['fq_name'][-1] self.logical_interface_vlan_tag = obj.get('logical_interface_vlan_tag', 0) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] if obj.physical_interface: parent = PhysicalInterfaceSM.get(obj.physical_interface) elif obj.physical_router: parent = PhysicalInterfaceSM.get(obj.physical_router) if parent: parent.logical_interfaces.discard(obj.uuid) obj.update_single_ref('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end LogicalInterfaceSM class PhysicalInterfaceSM(DBBaseSM): _dict = {} obj_type = 'physical_interface' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) pr = PhysicalRouterSM.get(self.physical_router) if pr: pr.physical_interfaces.add(self.uuid) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.physical_router = self.get_parent_uuid(obj) self.logical_interfaces = set([li['uuid'] for li in obj.get('logical_interfaces', [])]) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] pr = PhysicalRouterSM.get(obj.physical_router) if pr: pr.physical_interfaces.discard(obj.uuid) del cls._dict[uuid] # end delete # end PhysicalInterfaceSM class PhysicalRouterSM(DBBaseSM): _dict = {} obj_type = 'physical_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.management_ip = obj.get('physical_router_management_ip') self.vendor = obj.get('physical_router_vendor_name') self.physical_interfaces = set([pi['uuid'] for pi in obj.get('physical_interfaces', [])]) self.logical_interfaces = set([li['uuid'] for li in obj.get('logical_interfaces', [])]) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] del cls._dict[uuid] # end delete # end PhysicalRouterSM class ProjectSM(DBBaseSM): _dict = {} obj_type = 'project' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_instances = set() self.virtual_networks = set() obj_dict = self.update(obj_dict) self.set_children('virtual_network', obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_multiple_refs('service_instance', obj) return obj # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('service_instance', {}) del cls._dict[uuid] # end delete # end ProjectSM class DomainSM(DBBaseSM): _dict = {} obj_type = 'domain' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.fq_name = obj['fq_name'] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] del cls._dict[uuid] # end delete # end DomainSM class SecurityGroupSM(DBBaseSM): _dict = {} obj_type = 'security_group' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] del cls._dict[uuid] # end delete # end SecurityGroupSM class InterfaceRouteTableSM(DBBaseSM): _dict = {} obj_type = 'interface_route_table' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.virtual_machine_interfaces = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.update_multiple_refs('virtual_machine_interface', obj) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end InterfaceRouteTableSM class ServiceApplianceSM(DBBaseSM): _dict = {} obj_type = 'service_appliance' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_appliance_set = None self.kvpairs = [] self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] kvpairs = obj.get('service_appliance_properties', None) if kvpairs: self.kvpairs = kvpairs.get('key_value_pair', []) self.user_credential = obj.get('service_appliance_user_credentials', None) self.ip_address = obj.get('service_appliance_ip_address', None) self.service_appliance_set = self.get_parent_uuid(obj) if self.service_appliance_set: parent = ServiceApplianceSetSM.get(self.service_appliance_set) parent.service_appliances.add(self.uuid) # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] if obj.service_appliance_set: parent = ServiceApplianceSetSM.get(obj.service_appliance_set) if parent: parent.service_appliances.discard(obj.uuid) del cls._dict[uuid] # end delete # end ServiceApplianceSM class ServiceApplianceSetSM(DBBaseSM): _dict = {} obj_type = 'service_appliance_set' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_appliances = set() self.kvpairs = [] self.ha_mode = "standalone" self.update(obj_dict) # end __init__ def add(self): self._manager.loadbalancer_agent.load_driver(self) # end add def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.name = obj['fq_name'][-1] self.fq_name = obj['fq_name'] self.driver = obj.get('service_appliance_driver', None) kvpairs = obj.get('service_appliance_set_properties', None) if kvpairs: self.kvpairs = kvpairs.get('key_value_pair', []) self.service_appliances = set([sa['uuid'] for sa in obj.get('service_appliances', [])]) if 'service_appliance_ha_mode' in obj: self.ha_mode = obj['service_appliance_ha_mode'] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.loadbalancer_agent.unload_driver(obj) del cls._dict[uuid] # end delete # end ServiceApplianceSetSM class LogicalRouterSM(DBBaseSM): _dict = {} obj_type = 'logical_router' def __init__(self, uuid, obj_dict=None): self.uuid = uuid self.service_instance = None self.virtual_network = None self.virtual_machine_interfaces = set() self.last_virtual_machine_interfaces = set() self.update(obj_dict) # end __init__ def update(self, obj=None): if obj is None: obj = self.read_obj(self.uuid) self.parent_uuid = obj['parent_uuid'] self.update_single_ref('service_instance', obj) self.update_multiple_refs('virtual_machine_interface', obj) self.update_single_ref('virtual_network', obj) self.name = obj['fq_name'][-1] # end update @classmethod def delete(cls, uuid): if uuid not in cls._dict: return obj = cls._dict[uuid] cls._manager.snat_agent.delete_snat_instance(obj) obj.update_single_ref('service_instance', {}) obj.update_single_ref('virtual_network', {}) obj.update_multiple_refs('virtual_machine_interface', {}) del cls._dict[uuid] # end delete # end LogicalRouterSM
# =============================================================================== # Copyright 2014 Jake Ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= standard library imports ======================== import os import time # ============= enthought library imports ======================= from traits.api import HasTraits, Str, Float, Enum, List # ============= local library imports ========================== from pychron.core.yaml import yload from pychron.loggable import Loggable from pychron.paths import paths class TestResult(HasTraits): name = Str plugin = Str duration = Float result = Enum('Passed', 'Failed', 'Skipped', 'Invalid') description = Str error = Str class StartupTester(Loggable): results = List def __init__(self, *args, **kw): super(StartupTester, self).__init__(*args, **kw) self._tests = self._load() def test_plugin(self, plugin): pname = plugin.name try: tests = self._get_tests(pname) except KeyError: self.warning('Could not load tests for "{}". Check startup_test.yaml format'.format(pname)) return if not tests: self.debug('No tests for {}'.format(pname)) return for ti in tests: try: func = getattr(plugin, ti) except AttributeError: self.warning('Invalid test "{}" for plugin "{}"'.format(ti, pname)) self.add_test_result(plugin=pname, name=ti, result='Invalid') continue try: description = getattr(plugin, '{}_description'.format(ti)) except AttributeError: description = '' self.info('Testing "{} - {}"'.format(pname, ti)) st = time.time() try: result, error = func() except ValueError as e: self.critical('Invalid function {} {}'.format(plugin.name, ti)) raise e self.info('Test result={}'.format(result)) if isinstance(result, bool): result = 'Passed' if result else 'Failed' elif result is None: result = 'Invalid' # try: # error = getattr(plugin, '{}_error'.format(ti)) # except AttributeError: # error = '' self.add_test_result(name=ti, plugin=pname, description=description, duration=time.time() - st, error=error or '', result=result) def ok_close(self): ok = True specresult = next((ri for ri in self.results if 'spectrometer' in ri.plugin.lower() and ri.name == 'test_communication'), None) if specresult: ok = specresult.result == 'Passed' return ok def add_test_result(self, **kw): """ rdict is a dictionary with the follow key:value_type name: str, duration: float, result: enum('Passed', 'Fail', 'Skipped', 'Invalid') """ if kw: self.results.append(TestResult(**kw)) def _get_tests(self, name): if self._tests: # for ti in self._tests: # print(ti['plugin'].lower() , name.lower()) ts = next((ti['tests'] for ti in self._tests if ti['plugin'].lower() == name.lower()), None) if ts is None: self.debug('------------ Plugin "{}" not in startup_tests.yaml'.format(name)) self.debug(','.join((ti['plugin'] for ti in self._tests))) self.debug('---------------------------------------------------------------') return ts def _load(self): if os.path.isfile(paths.startup_tests): yd = yload(paths.startup_tests) return yd else: self.warning('No Startup Test file located at "{}"'.format(paths.startup_tests)) @property def all_passed(self): a = all([ri.result in ('Passed', 'Skipped') for ri in self.results]) return a # ============= EOF ============================================= # def do_test(self): # yl = self._load() # ip = InitializationParser() # # self.results = [] # # # test database connections # for i, ti in enumerate(yl): # if self._verify_test(i, ti): # if self._should_test(ti, ip): # self._do_test(ti, ip) # else: # name = ti.get('name') # self.results.append(TestResult(name=name, result='Skipped')) # else: # name = ti.get('name', 'Test #{:02n}'.format(i+1)) # self.results.append(TestResult(name=name, result='Invalid')) # # def _do_test(self, testdict, ip): # name = testdict['name'] # self.info('doing test {}'.format(name)) # func = getattr(self, '_test_{}'.format(name)) # st = time.time() # # result = TestResult(name=name) # ret = func(ip) # result.trait_set(duration=time.time() - st, result=ret) # self.results.append(result) # # def _test_pychron_database(self, ip): # return 'Passed' # # def _test_massspec_database(self, ip): # return 'Passed' # # def _should_test(self, td, ip): # """ # determine whether this test should be performed. # # for example database tests should be skipped in DatabasePlugin not enabled in the Initialization file # :param td: # :param ip: # :return: True if should perform this test # """ # ret = True # plugin_name = td.get('plugin') # if plugin_name: # plugin = ip.get_plugin(plugin_name) # if plugin is not None: # self.debug('Plugin "{}" not in Initialization file "{}"'.format(plugin_name, ip.path)) # ret = False # # return ret # # def _verify_test(self, i, td): # """ # verify this is a valid test dictionary # # :param i: counter # :param td: test dict # :return: True if valid test # """ # ret = True # for attr in ('name',): # if not attr in td: # self.warning('Failed to verify test #{:02n} no key={}, test={}'.format(i, attr, td)) # ret = False # # else: # if not hasattr(self, '_test_{}'.format(td['name'])): # self.warning('invalid test name. "{}"'.format(td['name'])) # ret = False # # return ret #
#!/usr/bin/env python ''' This is the "course" curriculum development workflow tool. Run "course help" for complete documentation. ''' import os import sys import bdc import re from contextlib import contextmanager from tempfile import NamedTemporaryFile from termcolor import colored from string import Template as StringTemplate import functools from subprocess import Popen from db_edu_util import (die, error, warn, debug, info, set_debug, databricks, working_directory) from db_edu_util.databricks import DatabricksError from typing import (Generator, Sequence, Pattern, NoReturn, Optional, Any, Dict, TextIO) # ----------------------------------------------------------------------------- # Constants # ----------------------------------------------------------------------------- VERSION = '2.7.1' PROG = os.path.basename(sys.argv[0]) CONFIG_PATH = os.path.expanduser("~/.databricks/course.cfg") USER = os.environ['USER'] # required PAGER_DEFAULT = 'less --RAW-CONTROL-CHARS' EDITOR_DEFAULT = 'open -a textedit' SOURCE_DEFAULT = '_Source' TARGET_DEFAULT = '_Build' AWS_PROFILE_DEFAULT = 'default' DB_PROFILE_DEFAULT = 'DEFAULT' COURSE_REPO_DEFAULT = os.path.expanduser('~/repos/training') DB_CONFIG_PATH_DEFAULT = os.path.expanduser('~/.databrickscfg') OPEN_DIR_DEFAULT = 'open' # Mac-specific, but can be configured. SELF_PACED_PATH_DEFAULT = os.path.join('courses', 'Self-Paced') USAGE = ''' {0}, version {VERSION} USAGE {0} (-h | --help | help | usage) {0} <subcommand> ... DESCRIPTION "course" is a build workflow tool, sitting on top of "bdc". Many subcommands can be chained. For instance: course upload build course work-on Delta build Some commands end the chain, because they consume all remaining arguments. Examples include "sed" and "xargs". Some subcommands require a course name. You can set the default course name via: course work-on coursename Some subcommands honor a command line override via --name. This override does not change the default stored in the configuration. You can also temporarily override the stored course name by setting COURSE_NAME in the environment. You can change the stored course name in three ways: 1) Use "work-on": course work-on Delta 2) Use "set": course set COURSE_NAME=ETL-Part-2 3) Manually edit the configuration file, "{CONFIG_PATH}" Some commands don't work if you're running the tool inside a Docker container. If you use the established Docker aliases, "course" will be able to tell that it's running inside Docker, and it'll refuse to run those commands. See below for the commands that aren't Docker-able. SUBCOMMANDS The various "course" commands are listed below. Those marked with "*" will NOT work in a Docker container. {0} (--help, -h, help) Show the full help page (this output) {0} (--version, -V) Display version and exit {0} toolversions Display course, bdc, gendbc, and master_parse versions and exit. {0} install-tools Deprecated. Use: curl -L https://git.io/fhaLg | bash {0} work-on <name> Specify and remember the course to build, upload, etc. {0} which Print the name of the current course. {0} download Download from SOURCE {0} build Build course from local files and upload built {1} artifacts to Databricks. {0} build-local Build course from local files without {1} uploading artifacts. {0} upload Upload local sources (from your Git repo) to {1} SOURCE. {0} upload-built Upload built artifacts. Assumes you already {1} ran "{0} buildlocal". {0} clean Remove TARGET (built artifact) from Databricks {0} clean-source Remove SOURCE (built artifact) from Databricks {0} status Run a "git status" on the local repository {1} clone. {0} diff Run a "git diff" on the local repository, {1} clone and pipe the output through PAGER. {1} PAGER isn't set, the diff output is just {1} dumped to standard output. {0} difftool * Run "git difftool" (with "opendiff") on {1} the repository clone. {0} tag Tag the Git repo with the current course and version. {1} Applies the tag to the top-most commit on the current {1} repo branch. {0} home * Open the folder containing the build.yaml. {0} modules * Open the folder containing the course modules. {0} repo * Open the root of the training repo in git. {0} yaml Edit the build.yaml. {0} config Edit your course script configuration file. {0} showconfig Print the in-memory configuration, which is {1} the parsed configuration file and possible {1} environment overrides. {0} guide * Edit the instructor guide. {0} deploy-images Deploy the course images to S3. NOTE: This {1} command is a stub. It will be implemented in {1} a later release. {0} set VAR=VALUE Configure and save a setting. Note that the {1} keys are not validated. Spelling matters! {0} grep [-i] <re> Search for a regular expression in course {1} notebooks. The grep is done internally (in {1} Python), so any regular expression accepted {1} by Python is suitable. Use "-i" to specify {1} case-blind matching. {0} sed <command> Search/replace text in notebooks using {1} "sed -i -E". Takes a single "sed" argument {1} and requires a version of "sed" that supports {1} the "-i" (inplace edit) option. (The stock {1} ("sed" on the Mac and on Linux both qualify.) The following subcommands consume all remaining arguments and end the chain. {0} xargs <command> Run <command> once per notebook. The following settings are honored. They are first read from the environment. If not there, {0} looks for them in the configuration file, located at "{CONFIG_PATH}". DB_CONFIG_PATH: Path to .databrickscfg Default: {DB_CONFIG_PATH_DEFAULT} DB_PROFILE: Profile to use within .databrickscfg profile Default: {DB_PROFILE_DEFAULT} DB_SHARD_HOME: Workspace path for home folder in Databricks. Required. COURSE_DEBUG: Set to 'true' (in environment) to enable debug messages. Default: false COURSE_NAME: Name of the course you wish to work on. Default: This must be provided, but can default from the stored config. COURSE_REPO: Path to git repo Default: {COURSE_REPO_DEFAULT} COURSE_HOME: Path to course in git repo Default: <COURSE_REPO>/courses/<course-name> COURSE_YAML: Path to the build.yaml Default: <COURSE_HOME>/build.yaml COURSE_MODULES: Path to modules in git repo Default: <COURSE_REPO>/modules/<course-name> COURSE_REMOTE_SOURCE: Workspace path for course source Default: <DB_SHARD_HOME>/<SOURCE>/<course-name> COURSE_REMOTE_TARGET: Workspace path for built course Default: <DB_SHARD_HOME>/<TARGET>/<course-name> COURSE_AWS_PROFILE: AWS authentication profile to use when uploading to S3. Default: {AWS_PROFILE_DEFAULT} SELF_PACED_PATH: A Unix-style path of directories to search for self-paced classes. Each directory is relative to <COURSE_REPO>. "course" searches each of these directories for subdirectories that contain "build.yaml" files, and it assumes each of those subdirectories is a self-paced course. Default: {SELF_PACED_PATH_DEFAULT} SOURCE: Prefix for uploading/downloading source files. Default: {SOURCE_DEFAULT} TARGET: Prefix for uploading/downloading built files. Default: {TARGET_DEFAULT} EDITOR: Text editor program Default: {EDITOR_DEFAULT} PAGER: Program to scroll text output Default: {PAGER_DEFAULT} OPEN_DIR: Program to use to open a folder Default: {OPEN_DIR_DEFAULT} '''.format( PROG, ' ' * len(PROG), CONFIG_PATH=CONFIG_PATH, VERSION=VERSION, PAGER_DEFAULT=PAGER_DEFAULT, DB_CONFIG_PATH_DEFAULT=DB_CONFIG_PATH_DEFAULT, DB_PROFILE_DEFAULT=DB_PROFILE_DEFAULT, COURSE_REPO_DEFAULT=COURSE_REPO_DEFAULT, AWS_PROFILE_DEFAULT=AWS_PROFILE_DEFAULT, SOURCE_DEFAULT=SOURCE_DEFAULT, TARGET_DEFAULT=TARGET_DEFAULT, EDITOR_DEFAULT=EDITOR_DEFAULT, OPEN_DIR_DEFAULT=OPEN_DIR_DEFAULT, SELF_PACED_PATH_DEFAULT=SELF_PACED_PATH_DEFAULT ) WARNING_PREFIX = 'WARNING: ' ERROR_PREFIX = 'ERROR: ' DEBUG_PREFIX = '(DEBUG) ' COLUMNS = int(os.environ.get('COLUMNS', '80')) - 1 # ----------------------------------------------------------------------------- # Classes # ----------------------------------------------------------------------------- class CourseError(Exception): pass # ----------------------------------------------------------------------------- # Internal functions # ----------------------------------------------------------------------------- @contextmanager def noop(result: Any, *args: Any, **kw: Any) -> Any: """ A no-op context manager, with is occasionally useful. Yields its first positional parameter. Ignores all the others. :param result: what to yield :param args: remaining positional parameters (ignored) :param kw: keyword parameters. Ignored. """ yield result @contextmanager def pager(cfg: Dict[str, str]) -> Generator[None, TextIO, None]: """ Provides a convenient way to write output to a pager. This context manager yields a file descriptor you can use to write to the pager. If the pager isn't defined, the file descriptor will just be stdout. This function manages cleanup and ensures that the pager has proper access to the terminal. :param cfg: the loaded configuration :returns: the file descriptior (as a yield) """ # Dump to a temporary file if the pager is defined. This allows the pager # to use stdin to read from the terminal. the_pager = cfg.get('PAGER') if the_pager: opener = NamedTemporaryFile else: # If this confuses you, go here: # https://docs.python.org/2/library/functools.html#functools.partial opener = functools.partial(noop, sys.stdout) with opener(mode='w') as out: yield out out.flush() if the_pager: # In this case, we know we have a NamedTemporaryFile. We can # send the temporary file to the pager. p = Popen(f'{the_pager} <{out.name}', shell=True) p.wait() def check_for_docker(command: str) -> NoReturn: # Note: This path is created by the shell script (../docker/create-image.sh) # specifically so we can test for it. if os.path.exists("/etc/in-docker"): raise CourseError( f'"{PROG} {command}" does not work inside a Docker container.' ) def cmd(shell_command: str, quiet: bool = False, dryrun: bool = False) -> NoReturn: """ Run a shell command. :param shell_command: the string containing the command and args :param quiet: True: don't echo the command before running it. :param dryrun: echo the command, but don't run it :raises CourseError: If the command exits with a non-zero status """ if dryrun or (not quiet): print(f"+ {shell_command}") if not dryrun: rc = os.system(shell_command) if rc != 0: raise CourseError(f'Command exited with {rc}') def quote_shell_arg(arg: str) -> str: """ Ensure that an argument to be passed to a shell command is quoted. :param arg: :return: possibly changed argument """ quoted = '' q = arg[0] if q in ('"', "'"): # Already quoted, hopefully. if arg[-1] != q: raise CourseError(f'Mismatched quotes in shell argument: {arg}') quoted = arg elif ('"' in arg) and ("'" in arg): raise CourseError( 'Shell argument cannot be quoted, since it contains single AND ' f'double quotes: {arg}' ) elif "'" in arg: quoted = '"' + arg + '"' else: quoted = "'" + arg + "'" return quoted def load_config(config_path: str, apply_defaults: bool = True, show_warnings: bool = False) -> Dict[str, str]: """ Load the configuration file. :param config_path: path to the configuration file :param apply_defaults: If True (default), apply all known default values. If False, just return what's in the config file. :param show_warnings: Warn about some things. Generally only desirable at program startup. :return: A dictionary of configuration items """ bad = False comment = re.compile("^\s*#.*$") cfg = {} parent_dir = os.path.dirname(config_path) if os.path.isfile(parent_dir): raise CourseError( f'''"{parent_dir}" already exists, but it isn't a directory.''' ) if not os.path.exists(parent_dir): os.makedirs(parent_dir) if os.path.exists(config_path): with open(config_path) as f: for (i, line) in enumerate([l.rstrip() for l in f.readlines()]): lno = i + 1 if len(line.strip()) == 0: continue if comment.search(line): continue fields = line.split('=') if len(fields) != 2: bad = True error(f'"{config_path}", line {lno}: Malformed line') continue cfg[fields[0]] = fields[1] if bad: raise CourseError("Configuration error(s).") setting_keys_and_defaults = ( # The second item in each tuple is a default value. The third item # indicates whether it can be overridden in the configuration or # the environment. # # The default is treated as a Python string template, so it can # substitute values from previous entries in the list. If the default # value is None, that generally means it can be overridden on the # command line (or depends on something else that can be), so it's # checked at runtime. ('DB_CONFIG_PATH', DB_CONFIG_PATH_DEFAULT, True), ('DB_PROFILE', DB_PROFILE_DEFAULT, True), ('DB_SHARD_HOME', None, True), ('PREFIX', None, True), # set later ('COURSE_NAME', None, True), # can be overridden ('COURSE_REPO', COURSE_REPO_DEFAULT, True), ('COURSE_HOME', None, False), # depends on COURSE_NAME ('COURSE_YAML', None, True), ('COURSE_MODULES', None, False), # depends on COURSE_NAME ('COURSE_REMOTE_SOURCE', None, False), # depends on COURSE_NAME ('COURSE_REMOTE_TARGET', None, False), # depends on COURSE_NAME ('COURSE_AWS_PROFILE', AWS_PROFILE_DEFAULT, True), ('SELF_PACED_PATH', SELF_PACED_PATH_DEFAULT, True), ('SOURCE', SOURCE_DEFAULT, True), ('TARGET', TARGET_DEFAULT, True), ('EDITOR', EDITOR_DEFAULT, True), ('PAGER', PAGER_DEFAULT, True), ('OPEN_DIR', OPEN_DIR_DEFAULT, True), ) # Remove anything that cannot be overridden. for e, default, allow_override in setting_keys_and_defaults: if (default is not None): continue v = cfg.get(e) if not v: continue if not allow_override: if show_warnings: warn(f'Ignoring "{e}" in the configuration file, because ' + "it's calculated at run-time.") del cfg[e] if apply_defaults: # Apply environment overrides. Then, check for missing ones where # appropriate, and apply defaults. for e, default, _ in setting_keys_and_defaults: v = os.environ.get(e) if v is not None: cfg[e] = v if (cfg.get(e) is None) and default: t = StringTemplate(default) cfg[e] = t.substitute(cfg) return cfg def get_self_paced_courses(cfg: Dict[str, str]) -> Sequence[str]: """ Find the names of all self-paced courses by querying the local Git repo clone. :param cfg the loaded config. COURSE_REPO and SELF_PACED_PATH must be set :return: the names of all self-paced courses (as simple directory names) """ self_paced_path = cfg['SELF_PACED_PATH'] for rel_path in self_paced_path.split(':'): self_paced_dir = os.path.join(cfg['COURSE_REPO'], rel_path) if not os.path.isdir(self_paced_dir): debug(f'Directory "{self_paced_dir}" (in SELF_PACED_PATH) ' + 'does not exist.') continue for f in os.listdir(self_paced_dir): if f[0] == '.': continue full_path = os.path.join(self_paced_dir, f) if not os.path.isdir(full_path): continue for course_file in os.listdir(full_path): p = os.path.join(full_path, course_file) if os.path.isdir(p): continue _, ext = os.path.splitext(course_file) if course_file.startswith("build") and ext == '.yaml': yield f break def update_config(cfg: Dict[str, str]) -> Dict[str, str]: """ Update the configuration, setting values that depend on course name, which is assumed to be set in the configuration. :param cfg: current configuration :return: possibly adjusted configuration :raises CourseError: Configuration error. """ course = cfg.get('COURSE_NAME') if not course: return cfg from os.path import join, normpath adj = cfg.copy() repo = adj['COURSE_REPO'] self_paced = list(get_self_paced_courses(cfg)) prefix = 'Self-Paced' if course in self_paced else '' adj['PREFIX'] = prefix adj['COURSE_HOME'] = normpath(join(repo, 'courses', prefix, course)) if not adj.get('COURSE_YAML'): adj['COURSE_YAML'] = join(adj['COURSE_HOME'], 'build.yaml') adj['COURSE_MODULES'] = join(repo, 'modules', prefix, course) db_shard_home = adj.get('DB_SHARD_HOME') if not db_shard_home: # Let the databricks Workspace layer figure out the appropriate value # for home. try: w = databricks.Workspace(adj['DB_PROFILE']) db_shard_home = w.home except databricks.DatabricksError as e: # Ignore config errors. ~/.databrickscfg might not be there. if e.code != databricks.StatusCode.CONFIG_ERROR: raise if db_shard_home: adj['COURSE_REMOTE_SOURCE'] = f'{db_shard_home}/{adj["SOURCE"]}/{course}' adj['COURSE_REMOTE_TARGET'] = f'{db_shard_home}/{adj["TARGET"]}/{course}' return adj def check_config(cfg: Dict[str, str], *keys: str) -> NoReturn: """ Check the configuration, aborting if required items (COURSE_NAME and COURSE_REPO) are missing. Only useful for commands that actually do work. :param cfg: the configuration :param keys: the keys to require. If not specified, defaults to the ones listed above. :return: None :raises ConfigError: on error """ if not keys: keys = ('COURSE_NAME', 'COURSE_REPO') for key in keys: if not cfg.get(key): raise CourseError( f'{key} must be set, either in the environment or in the ' + 'configuration file.' ) def build_file_path(cfg: Dict[str, str]) -> NoReturn: """ Return the path to the build file for the current course. :param cfg: the configuration. At a minimum, "COURSE_HOME" must be set. COURSE_YAML is also examined. :return: the path to the build file """ res = cfg.get('COURSE_YAML') if res: if not os.path.sep in res: # Simple file name. Join it with the course home. res = os.path.join(cfg['COURSE_HOME'], res) else: res = os.path.join(cfg['COURSE_HOME'], 'build.yaml') return res def configure(cfg: Dict[str, str], config_path: str, key: str, value: str) -> Dict[str, str]: """ Add or update a key=value setting in both the in-memory configuration and the stored configuration. :param cfg: the in-memory config :param config_path: the path to the stored configuration :param key: the key to add or update :param value: the new value :return: the adjusted in-memory configuration, which is a copy of the one passed in """ new_cfg = cfg.copy() new_cfg[key] = value # Don't update from the in-memory config, because it might not match # what's in the file. (It can be modified on the fly, based on the command # line, and those ephemeral changes should not be saved.) stored_cfg = load_config(config_path, apply_defaults=False) stored_cfg[key] = value with open(config_path, 'w') as f: for k, v in sorted(stored_cfg.items()): f.write(f'{k}={v}\n') return new_cfg def work_on(cfg: Dict[str, str], course_name: str, config_path: str) -> Dict[str, str]: """ Change the course name in the configuration. Implicitly updates the in-memory configuration by calling update_config(). :param cfg: :param course_name: :param config_path: :return: """ return update_config( configure(cfg, config_path, 'COURSE_NAME', course_name) ) def clean(cfg: Dict[str, str]) -> NoReturn: """ The guts of the "clean" command, this function deletes the built (target) notebooks for current course from the remote Databricks instance. :param cfg: The config. COURSE_NAME, COURSE_REMOTE_TARGET, and DB_PROFILE are assumed to be set. :return: Nothing """ check_config(cfg) db_profile = cfg['DB_PROFILE'] remote_target = cfg['COURSE_REMOTE_TARGET'] # It's odd to ensure that the directory exists before removing it, but # it's easier (and costs no more time, really) than to issue a REST call # to check whether it exists in the first place. And "rm" will die if # called on a nonexistent remote path. w = databricks.Workspace(profile=db_profile) w.mkdirs(remote_target) w.rm(remote_target, recursive=True) def clean_source(cfg: Dict[str, str]) -> NoReturn: """ The guts of the "clean-source" command, this function deletes the source notebooks for current course from the remote Databricks instance. :param cfg: The config. COURSE_NAME, COURSE_REMOTE_SOURCE, and DB_PROFILE are assumed to be set. :return: Nothing """ check_config(cfg) db_profile = cfg['DB_PROFILE'] remote_source = cfg['COURSE_REMOTE_SOURCE'] w = databricks.Workspace(profile=db_profile) w.mkdirs(remote_source) w.rm(remote_source, recursive=True) def download(cfg: Dict[str, str]) -> NoReturn: """ Download the source notebooks for the current course from the Databricks instance and put them back into the local Git repository. Delegates the actual download to bdc. :param cfg: The config. COURSE_HOME (and, by implication, COURSE_NAME) and DB_PROFILE are assumed to be set. :return: Nothing """ check_config(cfg) db_profile = cfg['DB_PROFILE'] bdc.bdc_download(build_file=build_file_path(cfg), shard_path=cfg['COURSE_REMOTE_SOURCE'], databricks_profile=db_profile, verbose=False) def upload(cfg: Dict[str, str]) -> NoReturn: """ Upload the source notebooks for the current course from the local Git repository to the Databricks instance. Delegates the actual upload to bdc. :param cfg: The config. COURSE_HOME (and, by implication, COURSE_NAME), COURSE_REMOTE_SOURCE, and DB_PROFILE are assumed to be set. :return: Nothing """ check_config(cfg) db_profile = cfg['DB_PROFILE'] bdc.bdc_upload(build_file=build_file_path(cfg), shard_path=cfg['COURSE_REMOTE_SOURCE'], databricks_profile=db_profile, verbose=False) def import_dbcs(cfg: Dict[str, str], build_dir: str, build_file: str) -> NoReturn: """ Find all DBC files under the build output directory for the current course, and upload them (import them) into the Databricks instance. :param cfg: The config. COURSE_NAME, COURSE_REMOTE_TARGET, and DB_PROFILE are assumed to be set. :param build_dir: The path to the build directory. :return: NOthing """ check_config(cfg) remote_target = cfg['COURSE_REMOTE_TARGET'] db_profile = cfg['DB_PROFILE'] def import_dbc(dbc: str, build: bdc.BuildData) -> NoReturn: ''' Import a single DBC. Assumes (a) the working directory is the build directory, and (b) that the remote target path has already been created. ''' w = databricks.Workspace(profile=db_profile) if build.has_profiles: parent_subpath = os.path.dirname(dbc) dir_to_make = f'{remote_target}/{os.path.dirname(parent_subpath)}' w.mkdirs(dir_to_make) remote_path = f'{remote_target}/{parent_subpath}' else: remote_path = remote_target info(f'Importing "{dbc}" to "{remote_path}"...') w.import_dbc(dbc, remote_path) # Get the build information. We'll need it later. build = bdc.bdc_load_build(build_file) print(f'Importing all DBCs under "{build_dir}" to remote "{remote_target}"') dbcs = [] with working_directory(build_dir) as pwd: for dirpath, _, filenames in os.walk('.'): for filename in filenames: _, ext = os.path.splitext(filename) if ext != '.dbc': continue dbcs.append(os.path.normpath(os.path.join(dirpath, filename))) if not dbcs: warn('No DBCs found.') else: clean(cfg) w = databricks.Workspace(profile=db_profile) # If we're doing a profile-based build, create the remote target. # The import operations will implicitly create the remote # subfolders. However, if we're not doing profile-based builds, # then creating the remote target ahead of time will cause the # import to fail, so don't do that. if build.has_profiles: w.mkdirs(remote_target) for dbc in dbcs: info(f'\nIn "{pwd}":') import_dbc(dbc, build) def build_local(cfg: Dict[str, str]) -> str: """ Build a course without uploading the results. :param cfg: the loaded config :return: the path to the build file, for convenience """ check_config(cfg, 'COURSE_NAME', 'COURSE_REPO') course_name = cfg['COURSE_NAME'] build_file = build_file_path(cfg) if not os.path.exists(build_file): die('Build file "{}" does not exist.'.format(build_file)) print(f"\nBuilding {course_name} using {os.path.basename(build_file)}") bdc.bdc_build_course(build_file, dest_dir='', overwrite=True, verbose=False) return build_file def build_and_upload(cfg: Dict[str, str]) -> NoReturn: """ Build the current course and upload (import) the built artifacts to the Databricks instance. :param cfg: The config. COURSE_NAME, COURSE_REMOTE_TARGET, and DB_PROFILE are assumed to be set. :return: Nothing """ check_config(cfg) build_file = build_local(cfg) build_dir = bdc.bdc_output_directory_for_build(build_file) import_dbcs(cfg, build_dir, build_file) def upload_build(cfg: Dict[str, str]) -> NoReturn: """ Upload an already-built course. :param cfg: The config. COURSE_NAME, COURSE_REMOTE_TARGET, and DB_PROFILE are assumed to be set. :return: None """ check_config(cfg) build_file = build_file_path(cfg) build_dir = bdc.bdc_output_directory_for_build(build_file) import_dbcs(cfg, build_dir) def install_tools() -> NoReturn: """ Install the build tools. Doesn't work inside a Docker container. """ info('"course install-tools" is deprecated, because it is not possible ' 'to update a Docker container from within itself. To update the ' 'build tools, run the following command:\n\n' 'curl -L https://git.io/fhaLg | bash') def browse_directory(cfg: Dict[str, str], path: str, subcommand: str) -> NoReturn: """ Browse a directory, using whatever tool is configured as OPEN_DIR. Does not work inside Docker. :param cfg: the loaded configuration :param path: the path to the directory :param subcommand: the "course" subcommand, for errors. :return: Nothing. """ check_for_docker(subcommand) open_dir = cfg['OPEN_DIR'] cmd(f'{open_dir} "{path}"') def edit_file(cfg: Dict[str, str], path: str, subcommand: str) -> NoReturn: """ Edit a file, using the configured EDITOR. Works inside Docker, provided EDITOR is set to something installed in the Docker instance (such as "vim"). :param cfg: the loaded configuration :param path: the path to the file :param subcommand: the "course" subcommand, for errors. :return: Nothing """ check_config(cfg, 'COURSE_NAME', 'COURSE_REPO') editor = cfg['EDITOR'] cmd(f'{editor} "{path}"') def edit_config(cfg: Dict[str, str]) -> Dict[str, str]: """ Edit the "course" configuration file, using the configured EDITOR. Works inside Docker, provided EDITOR is set to something installed in the Docker instance (such as "vim"). Automatically reloads the configuration after the edit. :param cfg: the loaded configuration :return: The possibly modified configuration """ edit_file(cfg, CONFIG_PATH, 'config') return load_config(CONFIG_PATH, show_warnings=True) def git_status(cfg: Dict[str, str]) -> NoReturn: """ Runs a "git status" against the local Git repository. :param cfg: the loaded config. COURSE_REPO must be set. :return: Nothing """ course_repo = cfg['COURSE_REPO'] print(f'+ cd {course_repo}') with working_directory(course_repo): cmd("git status") def git_diff(cfg: Dict[str, str]) -> NoReturn: """ Runs a "git diff" against the local Git repository. :param cfg: the loaded config. COURSE_REPO must be set. PAGER will be used if it is set. :return: Nothing """ check_config(cfg, 'COURSE_REPO') course_repo = cfg['COURSE_REPO'] pager = cfg['PAGER'] with working_directory(course_repo): if not pager: cmd("git diff") else: cmd(f"git diff | {pager}") def git_difftool(cfg: Dict[str, str]) -> NoReturn: """ Runs a "git difftool", using "opendiff", against the local Git repository. Does not work inside a Docker container. :param cfg: the loaded config. COURSE_REPO must be set. :return: Nothing """ check_config(cfg, 'COURSE_REPO') course_repo = cfg['COURSE_REPO'] check_for_docker("difftool") with working_directory(course_repo): cmd("git difftool --tool=opendiff --no-prompt") def git_tag(cfg: Dict[str, str]) -> NoReturn: """ Tag the git repo and branch with the course name and version. :param cfg: the loaded config. COURSE_YAML must be set. :return: Nothing. """ build_file = build_file_path(cfg) bdc.bdc_create_git_tag(build_file) def deploy_images(cfg: Dict[str, str]) -> NoReturn: """ Deploy the images for a course to the appropriate S3 location. STUB. NOT CURRENTLY IMPLEMENTED. :param cfg: the loaded configuration :return: Nothing """ warn("'deploy-images' is not yet implemented.") def grep(cfg: Dict[str, str], pattern: str, case_blind: bool = False) -> NoReturn: """ Searches for the specified regular expression in every notebook within the current course, printing the colorized matches to standard output. If PAGER is set, the matches will be piped through the pager. Note that this function does NOT use grep(1). It implements the regular expression matching and colorization entirely within Python. :param cfg: The config. :param pattern: The regular expression (a string, not a compiled pattern) to find :param case_blind: Whether or not to use case-blind matching :return: Nothing """ def grep_one(path: str, r: Pattern, out: TextIO) -> NoReturn: home = os.environ['HOME'] if home: printable_path = os.path.join( '~', path[len(home)+1:] ) else: printable_path = path matches = [] with open(path) as f: for line in f.readlines(): m = r.search(line) if not m: continue # If there's a pager, colorize the match. if cfg.get('PAGER'): s = m.start() e = m.end() matches.append( line[:s] + colored(line[s:e], 'red', attrs=['bold']) + line[e:]) else: matches.append(line) if matches: out.write(f'\n\n=== {printable_path}\n\n') out.write(''.join(matches)) r = None try: flags = 0 if not case_blind else re.IGNORECASE r = re.compile(pattern, flags=flags) except Exception as e: die(f'Cannot compile regular expression "{pattern}": {e}') check_config(cfg, 'COURSE_NAME', 'COURSE_REPO') with pager(cfg) as out: for nb in bdc.bdc_get_notebook_paths(build_file_path(cfg)): grep_one(nb, r, out) def sed(cfg: Dict[str, str], sed_cmd: str) -> NoReturn: """ Runs an in-place "sed" edit against every notebook in the course, using "sed -E". Requires a version of "sed" that supports the "-i" (in-place edit) option. :param cfg: the loaded configuration :param sed_cmd: the "sed" command, which may or may not be quoted. :return: Nothing """ check_config(cfg, 'COURSE_NAME', 'COURSE_REPO') for nb in bdc.bdc_get_notebook_paths(build_file_path(cfg)): # Quote the argument. q = sed_cmd[0] if q in ('"', "'"): # Already quoted, hopefully. if sed_cmd[-1] != q: raise CourseError( f'Mismatched quotes in sed argument: {sed_cmd}' ) quoted = sed_cmd elif ('"' in sed_cmd) and ("'" in sed_cmd): raise CourseError( '"sed" argument cannot be quoted, since it contains ' + f'single AND double quotes: {sed_cmd}' ) elif "'" in sed_cmd: quoted = '"' + sed_cmd + '"' else: quoted = "'" + sed_cmd + "'" cmd(f'sed -E -i "" -e {quoted} "{nb}"') def run_command_on_notebooks(cfg: Dict[str, str], command: str, args: Sequence[str]) -> NoReturn: """ Runs a command on every notebook in the current course. :param cfg: the loaded configuration. :param command: the command to run :param args: any command arguments, as a list :return: Nothing """ check_config(cfg, 'COURSE_NAME', 'COURSE_REPO') for nb in bdc.bdc_get_notebook_paths(build_file_path(cfg)): if args: quoted = ' '.join([quote_shell_arg(arg) for arg in args]) shell_command = f'{command} {quoted} {nb}' else: shell_command = f'{command} {nb}' try: cmd(shell_command) except CourseError as e: warn(str(e)) def help(cfg: Dict[str, str]) -> NoReturn: with pager(cfg) as out: out.write(USAGE) def print_tool_versions() -> NoReturn: import gendbc import master_parse import db_edu_util from databricks_cli.version import version as dbcli_version print(f"course: {VERSION}") print(f"bdc: {bdc.VERSION}") print(f"gendbc: {gendbc.VERSION}") print(f"master_parse: {master_parse.VERSION}") print(f"db_edu_util (library): {db_edu_util.VERSION}") print(f"databricks: {dbcli_version}") def which(cfg: Dict[str, str]) -> NoReturn: course_name = cfg.get('COURSE_NAME') if course_name: print(cfg['COURSE_NAME']) else: print('No course has been set.') # ----------------------------------------------------------------------------- # Main program # ----------------------------------------------------------------------------- def main(): if os.environ.get('COURSE_DEBUG', 'false') == 'true': set_debug(True) try: # Load the configuration and then run it through update_config() to # ensure that course name-related settings are updated, if necessary. cfg = update_config(load_config(CONFIG_PATH, show_warnings=True)) # Update the environment, for subprocesses we need to invoke. os.environ['EDITOR'] = cfg['EDITOR'] os.environ['PAGER'] = cfg['PAGER'] # Loop over the argument list, since we need to support chaining some # commands (e.g., "course download build"). This logic emulates # what was in the original shell script version, and it's not easily # handled by Python's argparse or docopt. So, ugly as it is, we go # with manual parsing. if len(sys.argv) == 1: args = ["help"] else: args = sys.argv[1:] i = 0 while i < len(args): cmd = args[i] if cmd in ('--version', '-V'): print(VERSION) break if cmd in ('toolversions', 'tool-versions'): print_tool_versions() break if cmd in ('-n', '--name'): try: i += 1 # Changing the name of the course has to reset the # build.yaml name. del cfg['COURSE_YAML'] cfg['COURSE_NAME'] = args[i] cfg = update_config(cfg) except IndexError: die("Saw -n or --name without subsequent course name.") elif cmd in ('-f', '--build-file'): try: i += 1 cfg['COURSE_YAML'] = args[i] cfg = update_config(cfg) except IndexError: die("Saw -f or --build-file without subsequent file name.") elif cmd in ('-h', '--help', 'help', 'usage'): help(cfg) break elif cmd in ('work-on', 'workon'): try: i += 1 cfg = work_on(cfg, args[i], CONFIG_PATH) except IndexError: die('Expected course name after "work-on".') elif cmd == 'tag': git_tag(cfg) elif cmd == 'which': which(cfg) elif cmd in ('install-tools', 'installtools'): install_tools() elif cmd == 'download': download(cfg) elif cmd == 'upload': upload(cfg) elif cmd in ('upload-built', 'uploadbuilt'): upload_build(cfg) elif cmd == 'build': build_and_upload(cfg) elif cmd in ('build-local', 'buildlocal'): build_local(cfg) elif cmd == 'clean': clean(cfg) elif cmd in ('clean-source', 'cleansource'): clean_source(cfg) elif cmd in ('deploy-images', 'deployimages'): deploy_images(cfg) elif cmd == 'status': git_status(cfg) elif cmd == 'diff': git_diff(cfg) elif cmd == 'difftool': git_difftool(cfg) elif cmd == 'home': browse_directory(cfg, cfg['COURSE_HOME'], 'home') elif cmd == 'modules': browse_directory(cfg, cfg['COURSE_MODULES'], 'modules') elif cmd == 'repo': browse_directory(cfg, cfg['COURSE_REPO'], 'repo') elif cmd == 'config': cfg = edit_config(cfg) elif cmd == 'yaml': edit_file(cfg, build_file_path(cfg), 'yaml') elif cmd == 'guide': edit_file(cfg, os.path.join(cfg['COURSE_HOME'], 'Teaching-Guide.md'), 'guide') elif cmd == ('deploy-images', 'deployimages'): deploy_images(cfg) elif cmd == 'grep': try: i += 1 pattern = args[i] if pattern == '-i': case_blind = True i += 1 pattern = args[i] else: case_blind = False grep(cfg, pattern, case_blind) except IndexError: die('Missing grep argument(s).') elif cmd == 'sed': try: i += 1 sed(cfg, args[i]) except IndexError: die('Missing sed argument.') elif cmd == 'xargs': # All the remaining arguments go to the command. try: i += 1 command = args[i] if i < len(args): i += 1 command_args = args[i:] else: command_args = [] run_command_on_notebooks(cfg, command, command_args) break except IndexError: die('Missing command to run.') elif cmd == 'set': try: i += 1 setting = args[i] fields = setting.split('=') if len(fields) != 2: die('Argument to "set" must be of the form CONF=VAL.') key, value = fields value = value.replace('"', '') cfg = configure(cfg, CONFIG_PATH, key, value) except IndexError: die('Missing CONF=VAL argument to "set".') elif cmd == "showconfig": hdr = "Current configuration" print('-' * len(hdr)) print(hdr) print('-' * len(hdr)) for key in sorted(cfg.keys()): print(f'{key}="{cfg[key]}"') else: die(f'"{cmd}" is not a valid "course" subcommand.') i += 1 except CourseError as e: error(str(e)) except bdc.BDCError as e: error(str(e)) except KeyboardInterrupt: error('\n*** Interrupted.') if __name__ == '__main__': main()
import numpy as np from scipy.special import expi, gammainc from .ssp_basis import SSPBasis __all__ = ["CSPBasis", "StepSFHBasis", "CompositeSFH", "LinearSFHBasis"] # change base from .constants import loge class CSPBasis(object): """ A class for composite stellar populations, which can be composed from multiple versions of parameterized SFHs. Deprecated, Use CSPSpecBasis instead. """ def __init__(self, compute_vega_mags=False, zcontinuous=1, vactoair_flag=False, **kwargs): # This is a StellarPopulation object from fsps self.csp = fsps.StellarPopulation(compute_vega_mags=compute_vega_mags, zcontinuous=zcontinuous, vactoair_flag=vactoair_flag) self.params = {} def get_spectrum(self, outwave=None, filters=None, peraa=False, **params): """Given a theta vector, generate spectroscopy, photometry and any extras (e.g. stellar mass). :param theta: ndarray of parameter values. :param sps: A python-fsps StellarPopulation object to be used for generating the SED. :returns spec: The restframe spectrum in units of maggies. :returns phot: The apparent (redshifted) observed frame maggies in each of the filters. :returns extras: A list of the ratio of existing stellar mass to total mass formed for each component, length ncomp. """ self.params.update(**params) # Pass the model parameters through to the sps object ncomp = len(self.params['mass']) for ic in range(ncomp): s, p, x = self.one_sed(component_index=ic, filterlist=filters) try: spec += s maggies += p extra += [x] except(NameError): spec, maggies, extra = s, p, [x] # `spec` is now in Lsun/Hz, with the wavelength array being the # observed frame wavelengths. Flux array (and maggies) have not been # increased by (1+z) due to cosmological redshift w = self.ssp.wavelengths if outwave is not None: spec = np.interp(outwave, w, spec) else: outwave = w # Distance dimming and unit conversion zred = self.params.get('zred', 0.0) if (zred == 0) or ('lumdist' in self.params): # Use 10pc for the luminosity distance (or a number provided in the # lumdist key in units of Mpc). Do not apply cosmological (1+z) # factor to the flux. dfactor = (self.params.get('lumdist', 1e-5) * 1e5)**2 a = 1.0 else: # Use the comsological luminosity distance implied by this # redshift. Cosmological (1+z) factor on the flux was already done in one_sed lumdist = cosmo.luminosity_distance(zred).value dfactor = (lumdist * 1e5)**2 if peraa: # spectrum will be in erg/s/cm^2/AA spec *= to_cgs / dfactor * lightspeed / outwave**2 else: # Spectrum will be in maggies spec *= to_cgs / dfactor / (3631*jansky_cgs) # Convert from absolute maggies to apparent maggies maggies /= dfactor return spec, maggies, extra def one_sed(self, component_index=0, filterlist=[]): """Get the SED of one component for a multicomponent composite SFH. Should set this up to work as an iterator. :param component_index: Integer index of the component to calculate the SED for. :param filterlist: A list of strings giving the (FSPS) names of the filters onto which the spectrum will be projected. :returns spec: The restframe spectrum in units of Lsun/Hz. :returns maggies: Broadband fluxes through the filters named in ``filterlist``, ndarray. Units are observed frame absolute maggies: M = -2.5 * log_{10}(maggies). :returns extra: The extra information corresponding to this component. """ # Pass the model parameters through to the sps object, and keep track # of the mass of this component mass = 1.0 for k, vs in list(self.params.items()): try: v = vs[component_index] except(IndexError, TypeError): v = vs if k in self.csp.params.all_params: self.csp.params[k] = deepcopy(v) if k == 'mass': mass = v # Now get the spectrum. The spectrum is in units of # Lsun/Hz/per solar mass *formed*, and is restframe w, spec = self.csp.get_spectrum(tage=self.csp.params['tage'], peraa=False) # redshift and get photometry. Note we are boosting fnu by (1+z) *here* a, b = (1 + self.csp.params['zred']), 0.0 wa, sa = w * (a + b), spec * a # Observed Frame if filterlist is not None: mags = getSED(wa, lightspeed/wa**2 * sa * to_cgs, filterlist) phot = np.atleast_1d(10**(-0.4 * mags)) else: phot = 0.0 # now some mass normalization magic mfrac = self.csp.stellar_mass if np.all(self.params.get('mass_units', 'mstar') == 'mstar'): # Convert input normalization units from per stellar masss to per mass formed mass /= mfrac # Output correct units return mass * sa, mass * phot, mfrac class StepSFHBasis(SSPBasis): """Subclass of SSPBasis that computes SSP weights for piecewise constant SFHs (i.e. a binned SFH). The parameters for this SFH are: * `agebins` - array of shape (nbin, 2) giving the younger and older (in lookback time) edges of each bin. If `interp_type` is `"linear"', these are assumed to be in years. Otherwise they are in log10(years) * `mass` - array of shape (nbin,) giving the total surviving stellar mass (in solar masses) in each bin, unless the `mass_units` parameter is set to something different `"mstar"`, in which case the units are assumed to be total stellar mass *formed* in each bin. The `agebins` parameter *must not be changed* without also setting `self._ages=None`. """ @property def all_ssp_weights(self): # Cache age bins and relative weights. This means params['agebins'] # *must not change* without also setting _ages = None if getattr(self, '_ages', None) is None: self._ages = self.params['agebins'] nbin, nssp = len(self._ages), len(self.logage) + 1 self._bin_weights = np.zeros([nbin, nssp]) for i, (t1, t2) in enumerate(self._ages): # These *should* sum to one (or zero) for each bin self._bin_weights[i, :] = self.bin_weights(t1, t2) # Now normalize the weights in each bin by the mass parameter, and sum # over bins. bin_masses = self.params['mass'] if np.all(self.params.get('mass_units', 'mformed') == 'mstar'): # Convert from mstar to mformed for each bin. We have to do this # here as well as in get_spectrum because the *relative* # normalization in each bin depends on the units, as well as the # overall normalization. bin_masses /= self.bin_mass_fraction w = (bin_masses[:, None] * self._bin_weights).sum(axis=0) return w @property def bin_mass_fraction(self): """Return the ratio m_star(surviving) / m_formed for each bin. """ try: mstar = self.ssp_stellar_masses w = self._bin_weights bin_mfrac = (mstar[None, :] * w).sum(axis=-1) / w.sum(axis=-1) return bin_mfrac except(AttributeError): print('agebin info or ssp masses not chached?') return 1.0 def bin_weights(self, amin, amax): """Compute normalizations required to get a piecewise constant SFH within an age bin. This is super complicated and obscured. The output weights are such that one solar mass will have formed during the bin (i.e. SFR = 1/(amax-amin)) This computes weights using \int_tmin^tmax dt (\log t_i - \log t) / (\log t_{i+1} - \log t_i) but see sfh.tex for the detailed calculation and the linear time interpolation case. """ if self.interp_type == 'linear': sspages = np.insert(10**self.logage, 0, 0) func = constant_linear mass = amax - amin elif self.interp_type == 'logarithmic': sspages = np.insert(self.logage, 0, self.mint_log) func = constant_logarithmic mass = 10**amax - 10**amin assert amin >= sspages[0] assert amax <= sspages.max() # below could be done by using two separate dt vectors instead of two # age vectors ages = np.array([sspages[:-1], sspages[1:]]) dt = np.diff(ages, axis=0) tmin, tmax = np.clip(ages, amin, amax) # get contributions from SSP sub-bin to the left and from SSP sub-bin # to the right left, right = (func(ages, tmax) - func(ages, tmin)) / dt # put into full array ww = np.zeros(len(sspages)) ww[:-1] += right # last element has no sub-bin to the right ww[1:] += -left # need to flip sign # normalize to 1 solar mass formed and return return ww / mass class CompositeSFH(SSPBasis): """Subclass of SSPBasis that computes SSP weights for a parameterized SF. The parameters for this SFH are: * `sfh_type` - String of "delaytau", "tau", "simha" * `tage`, `sf_trunc`, `sf_slope`, `const`, `fburst`, `tau` * `mass` - """ def configure(self): """This reproduces FSPS-like combinations of SFHs. Note that the *same* parameter set is passed to each component in the combination """ sfhs = [self.sfh_type] limits = len(sfhs) * ['regular'] if 'simha' in self.sfh_type: sfhs = ['delaytau', 'linear'] limits = ['regular', 'simha'] fnames = ['{0}_{1}'.format(f, self.interp_type) for f in sfhs] lnames = ['{}_limits'.format(f) for f in limits] self.funcs = [globals()[f] for f in fnames] self.limits = [globals()[f] for f in lnames] if self.interp_type == 'linear': sspages = np.insert(10**self.logage, 0, 0) elif self.interp_type == 'logarithmic': sspages = np.insert(self.logage, 0, self.mint_log) self.ages = np.array([sspages[:-1], sspages[1:]]) self.dt = np.diff(self.ages, axis=0) @property def _limits(self): pass @property def _funcs(self): pass @property def all_ssp_weights(self): # Full output weight array. We keep separate vectors for each # component so we can renormalize after the loop, but for many # components it would be better to renormalize and sum within the loop ww = np.zeros([len(self.funcs), self.ages.shape[-1] + 1]) # Loop over components. Note we are sending the same params to every component for i, (limit, func) in enumerate(zip(self.limits, self.funcs)): ww[i, :] = self.ssp_weights(func, limit, self.params) # renormalize each component to 1 Msun assert np.all(ww >= 0) wsum = ww.sum(axis=1) # unless truly no SF in the component if 0 in wsum: wsum[wsum == 0] = 1.0 ww /= wsum[:, None] # apply relative normalizations ww *= self.normalizations(**self.params)[:, None] # And finally add all components together and renormalize again to # 1Msun and return return ww.sum(axis=0) / ww.sum() def ssp_weights(self, integral, limit_function, params, **extras): # build full output weight vector ww = np.zeros(self.ages.shape[-1] + 1) tmin, tmax = limit_function(self.ages, mint_log=self.mint_log, interp_type=self.interp_type, **params) left, right = (integral(self.ages, tmax, **params) - integral(self.ages, tmin, **params)) / self.dt # Put into full array, shifting the `right` terms by 1 element ww[:-1] += right # last SSP has no sub-bin to the right ww[1:] += -left # need to flip sign # Note that now ww[i,1] = right[1] - left[0], where # left[0] is the integral from tmin,0 to tmax,0 of # SFR(t) * (sspages[0] - t)/(sspages[1] - sspages[0]) and # right[1] is the integral from tmin,1 to tmax,1 of # SFR(t) * (sspages[2] - t)/(sspages[2] - sspages[1]) return ww def normalizations(self, tage=0., sf_trunc=0, sf_slope=0, const=0, fburst=0, tau=0., **extras): if (sf_trunc <= 0) or (sf_trunc > tage): Tmax = tage else: Tmax = sf_trunc # Tau models. SFH=1 -> power=1; SFH=4,5 -> power=2 if ('delay' in self.sfh_type) or ('simha' in self.sfh_type): power = 2. else: power = 1. mass_tau = tau * gammainc(power, Tmax/tau) if 'simha' not in self.sfh_type: return np.array([mass_tau]) # SFR at Tmax sfr_q = (Tmax/tau)**(power-1) * np.exp(-Tmax/tau) # linear. integral of (1 - m * (T - Tmax)) from Tmax to Tzero if sf_slope == 0.: Tz = tage else: Tz = Tmax + 1/np.float64(sf_slope) if (Tz < Tmax) or (Tz > tage) or (not np.isfinite(Tz)): Tz = tage m = sf_slope mass_linear = (Tz - Tmax) - m/2.*(Tz**2 + Tmax**2) + m*Tz*Tmax # normalize the linear portion relative to the tau portion norms = np.array([1, mass_linear * sfr_q / mass_tau]) norms /= norms.sum() # now add in constant and burst if (const > 0) or (fburst > 0): norms = (1-fburst-const) * norms norms.tolist().extend([const, fburst]) return np.array(norms) class LinearSFHBasis(SSPBasis): """Subclass of SSPBasis that computes SSP weights for piecewise linear SFHs (i.e. a linearly interpolated tabular SFH). The parameters for this SFH are: * `ages` - array of shape (ntab,) giving the lookback time of each tabulated SFR. If `interp_type` is `"linear"', these are assumed to be in years. Otherwise they are in log10(years) * `sfr` - array of shape (ntab,) giving the SFR (in Msun/yr) * `logzsol` * `dust2` """ def get_galaxy_spectrum(self): raise(NotImplementedError) def regular_limits(ages, tage=0., sf_trunc=0., mint_log=-3, interp_type='logarithmic', **extras): # get the truncation time in units of lookback time if (sf_trunc <= 0) or (sf_trunc > tage): tq = 0 else: tq = tage - sf_trunc if interp_type == 'logarithmic': tq = np.log10(np.max([tq, 10**mint_log])) tage = np.log10(np.max([tage, 10**mint_log])) return np.clip(ages, tq, tage) def simha_limits(ages, tage=0., sf_trunc=0, sf_slope=0., mint_log=-3, interp_type='logarithmic', **extras): # get the truncation time in units of lookback time if (sf_trunc <= 0) or (sf_trunc > tage): tq = 0 else: tq = tage - sf_trunc t0 = tq - 1. / np.float64(sf_slope) if (t0 > tq) or (t0 <= 0) or (not np.isfinite(t0)): t0 = 0. if interp_type == 'logarithmic': tq = np.log10(np.max([tq, 10**mint_log])) t0 = np.log10(np.max([t0, 10**mint_log])) return np.clip(ages, t0, tq) def constant_linear(ages, t, **extras): """Indefinite integral for SFR = 1 :param ages: Linear age(s) of the SSPs. :param t: Linear time at which to evaluate the indefinite integral """ return ages * t - t**2 / 2 def constant_logarithmic(logages, logt, **extras): """SFR = 1 """ t = 10**logt return t * (logages - logt + loge) def tau_linear(ages, t, tau=None, **extras): """SFR = e^{(tage-t)/\tau} """ return (ages - t + tau) * np.exp(t / tau) def tau_logarithmic(logages, logt, tau=None, **extras): """SFR = e^{(tage-t)/\tau} """ tprime = 10**logt / tau return (logages - logt) * np.exp(tprime) + loge * expi(tprime) def delaytau_linear(ages, t, tau=None, tage=None, **extras): """SFR = (tage-t) * e^{(tage-t)/\tau} """ bracket = tage * ages - (tage + ages)*(t - tau) + t**2 - 2*t*tau + 2*tau**2 return bracket * np.exp(t / tau) def delaytau_logarithmic(logages, logt, tau=None, tage=None, **extras): """SFR = (tage-t) * e^{(tage-t)/\tau} """ t = 10**logt tprime = t / tau a = (t - tage - tau) * (logt - logages) - tau * loge b = (tage + tau) * loge return a * np.exp(tprime) + b * expi(tprime) def linear_linear(ages, t, tage=None, sf_trunc=0, sf_slope=0., **extras): """SFR = [1 - sf_slope * (tage-t)] """ tq = np.max([0, tage-sf_trunc]) k = 1 - sf_slope * tq return k * ages * t + (sf_slope*ages - k) * t**2 / 2 - sf_slope * t**3 / 3 def linear_logarithmic(logages, logt, tage=None, sf_trunc=0, sf_slope=0., **extras): """SFR = [1 - sf_slope * (tage-t)] """ tq = np.max([0, tage-sf_trunc]) t = 10**logt k = 1 - sf_slope * tq term1 = k * t * (logages - logt + loge) term2 = sf_slope * t**2 / 2 * (logages - logt + loge / 2) return term1 + term2 def burst_linear(ages, t, tburst=None, **extras): """Burst. SFR = \delta(t-t_burst) """ return ages - tburst def burst_logarithmic(logages, logt, tburst=None, **extras): """Burst. SFR = \delta(t-t_burst) """ return logages - np.log10(tburst)
"""Channels module for Zigbee Home Automation.""" import asyncio from typing import Any, Dict, List, Optional, Tuple, Union import zigpy.zcl.clusters.closures from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_send from . import ( # noqa: F401 # pylint: disable=unused-import base, closures, general, homeautomation, hvac, lighting, lightlink, manufacturerspecific, measurement, protocol, security, smartenergy, ) from .. import ( const, device as zha_core_device, discovery as zha_disc, registries as zha_regs, typing as zha_typing, ) ChannelsDict = Dict[str, zha_typing.ChannelType] class Channels: """All discovered channels of a device.""" def __init__(self, zha_device: zha_typing.ZhaDeviceType) -> None: """Initialize instance.""" self._pools: List[zha_typing.ChannelPoolType] = [] self._power_config = None self._identify = None self._semaphore = asyncio.Semaphore(3) self._unique_id = str(zha_device.ieee) self._zdo_channel = base.ZDOChannel(zha_device.device.endpoints[0], zha_device) self._zha_device = zha_device @property def pools(self) -> List["ChannelPool"]: """Return channel pools list.""" return self._pools @property def power_configuration_ch(self) -> zha_typing.ChannelType: """Return power configuration channel.""" return self._power_config @power_configuration_ch.setter def power_configuration_ch(self, channel: zha_typing.ChannelType) -> None: """Power configuration channel setter.""" if self._power_config is None: self._power_config = channel @property def identify_ch(self) -> zha_typing.ChannelType: """Return power configuration channel.""" return self._identify @identify_ch.setter def identify_ch(self, channel: zha_typing.ChannelType) -> None: """Power configuration channel setter.""" if self._identify is None: self._identify = channel @property def semaphore(self) -> asyncio.Semaphore: """Return semaphore for concurrent tasks.""" return self._semaphore @property def zdo_channel(self) -> zha_typing.ZDOChannelType: """Return ZDO channel.""" return self._zdo_channel @property def zha_device(self) -> zha_typing.ZhaDeviceType: """Return parent zha device.""" return self._zha_device @property def unique_id(self): """Return the unique id for this channel.""" return self._unique_id @property def zigbee_signature(self) -> Dict[int, Dict[str, Any]]: """Get the zigbee signatures for the pools in channels.""" return { signature[0]: signature[1] for signature in [pool.zigbee_signature for pool in self.pools] } @classmethod def new(cls, zha_device: zha_typing.ZhaDeviceType) -> "Channels": """Create new instance.""" channels = cls(zha_device) for ep_id in sorted(zha_device.device.endpoints): channels.add_pool(ep_id) return channels def add_pool(self, ep_id: int) -> None: """Add channels for a specific endpoint.""" if ep_id == 0: return self._pools.append(ChannelPool.new(self, ep_id)) async def async_initialize(self, from_cache: bool = False) -> None: """Initialize claimed channels.""" await self.zdo_channel.async_initialize(from_cache) self.zdo_channel.debug("'async_initialize' stage succeeded") await asyncio.gather( *(pool.async_initialize(from_cache) for pool in self.pools) ) async def async_configure(self) -> None: """Configure claimed channels.""" await self.zdo_channel.async_configure() self.zdo_channel.debug("'async_configure' stage succeeded") await asyncio.gather(*(pool.async_configure() for pool in self.pools)) @callback def async_new_entity( self, component: str, entity_class: zha_typing.CALLABLE_T, unique_id: str, channels: List[zha_typing.ChannelType], ): """Signal new entity addition.""" if self.zha_device.status == zha_core_device.DeviceStatus.INITIALIZED: return self.zha_device.hass.data[const.DATA_ZHA][component].append( (entity_class, (unique_id, self.zha_device, channels)) ) @callback def async_send_signal(self, signal: str, *args: Any) -> None: """Send a signal through hass dispatcher.""" async_dispatcher_send(self.zha_device.hass, signal, *args) @callback def zha_send_event(self, event_data: Dict[str, Union[str, int]]) -> None: """Relay events to hass.""" self.zha_device.hass.bus.async_fire( "zha_event", { const.ATTR_DEVICE_IEEE: str(self.zha_device.ieee), const.ATTR_UNIQUE_ID: self.unique_id, **event_data, }, ) class ChannelPool: """All channels of an endpoint.""" def __init__(self, channels: Channels, ep_id: int): """Initialize instance.""" self._all_channels: ChannelsDict = {} self._channels: Channels = channels self._claimed_channels: ChannelsDict = {} self._id: int = ep_id self._client_channels: Dict[str, zha_typing.ClientChannelType] = {} self._unique_id: str = f"{channels.unique_id}-{ep_id}" @property def all_channels(self) -> ChannelsDict: """All server channels of an endpoint.""" return self._all_channels @property def claimed_channels(self) -> ChannelsDict: """Channels in use.""" return self._claimed_channels @property def client_channels(self) -> Dict[str, zha_typing.ClientChannelType]: """Return a dict of client channels.""" return self._client_channels @property def endpoint(self) -> zha_typing.ZigpyEndpointType: """Return endpoint of zigpy device.""" return self._channels.zha_device.device.endpoints[self.id] @property def id(self) -> int: """Return endpoint id.""" return self._id @property def nwk(self) -> int: """Device NWK for logging.""" return self._channels.zha_device.nwk @property def is_mains_powered(self) -> bool: """Device is_mains_powered.""" return self._channels.zha_device.is_mains_powered @property def manufacturer(self) -> Optional[str]: """Return device manufacturer.""" return self._channels.zha_device.manufacturer @property def manufacturer_code(self) -> Optional[int]: """Return device manufacturer.""" return self._channels.zha_device.manufacturer_code @property def hass(self): """Return hass.""" return self._channels.zha_device.hass @property def model(self) -> Optional[str]: """Return device model.""" return self._channels.zha_device.model @property def skip_configuration(self) -> bool: """Return True if device does not require channel configuration.""" return self._channels.zha_device.skip_configuration @property def unique_id(self): """Return the unique id for this channel.""" return self._unique_id @property def zigbee_signature(self) -> Tuple[int, Dict[str, Any]]: """Get the zigbee signature for the endpoint this pool represents.""" return ( self.endpoint.endpoint_id, { const.ATTR_PROFILE_ID: self.endpoint.profile_id, const.ATTR_DEVICE_TYPE: f"0x{self.endpoint.device_type:04x}" if self.endpoint.device_type is not None else "", const.ATTR_IN_CLUSTERS: [ f"0x{cluster_id:04x}" for cluster_id in sorted(self.endpoint.in_clusters) ], const.ATTR_OUT_CLUSTERS: [ f"0x{cluster_id:04x}" for cluster_id in sorted(self.endpoint.out_clusters) ], }, ) @classmethod def new(cls, channels: Channels, ep_id: int) -> "ChannelPool": """Create new channels for an endpoint.""" pool = cls(channels, ep_id) pool.add_all_channels() pool.add_client_channels() zha_disc.PROBE.discover_entities(pool) return pool @callback def add_all_channels(self) -> None: """Create and add channels for all input clusters.""" for cluster_id, cluster in self.endpoint.in_clusters.items(): channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get( cluster_id, base.ZigbeeChannel ) # really ugly hack to deal with xiaomi using the door lock cluster # incorrectly. if ( hasattr(cluster, "ep_attribute") and cluster_id == zigpy.zcl.clusters.closures.DoorLock.cluster_id and cluster.ep_attribute == "multistate_input" ): channel_class = general.MultistateInput # end of ugly hack channel = channel_class(cluster, self) if channel.name == const.CHANNEL_POWER_CONFIGURATION: if ( self._channels.power_configuration_ch or self._channels.zha_device.is_mains_powered ): # on power configuration channel per device continue self._channels.power_configuration_ch = channel elif channel.name == const.CHANNEL_IDENTIFY: self._channels.identify_ch = channel self.all_channels[channel.id] = channel @callback def add_client_channels(self) -> None: """Create client channels for all output clusters if in the registry.""" for cluster_id, channel_class in zha_regs.CLIENT_CHANNELS_REGISTRY.items(): cluster = self.endpoint.out_clusters.get(cluster_id) if cluster is not None: channel = channel_class(cluster, self) self.client_channels[channel.id] = channel async def async_initialize(self, from_cache: bool = False) -> None: """Initialize claimed channels.""" await self._execute_channel_tasks("async_initialize", from_cache) async def async_configure(self) -> None: """Configure claimed channels.""" await self._execute_channel_tasks("async_configure") async def _execute_channel_tasks(self, func_name: str, *args: Any) -> None: """Add a throttled channel task and swallow exceptions.""" async def _throttle(coro): async with self._channels.semaphore: return await coro channels = [*self.claimed_channels.values(), *self.client_channels.values()] tasks = [_throttle(getattr(ch, func_name)(*args)) for ch in channels] results = await asyncio.gather(*tasks, return_exceptions=True) for channel, outcome in zip(channels, results): if isinstance(outcome, Exception): channel.warning("'%s' stage failed: %s", func_name, str(outcome)) continue channel.debug("'%s' stage succeeded", func_name) @callback def async_new_entity( self, component: str, entity_class: zha_typing.CALLABLE_T, unique_id: str, channels: List[zha_typing.ChannelType], ): """Signal new entity addition.""" self._channels.async_new_entity(component, entity_class, unique_id, channels) @callback def async_send_signal(self, signal: str, *args: Any) -> None: """Send a signal through hass dispatcher.""" self._channels.async_send_signal(signal, *args) @callback def claim_channels(self, channels: List[zha_typing.ChannelType]) -> None: """Claim a channel.""" self.claimed_channels.update({ch.id: ch for ch in channels}) @callback def unclaimed_channels(self) -> List[zha_typing.ChannelType]: """Return a list of available (unclaimed) channels.""" claimed = set(self.claimed_channels) available = set(self.all_channels) return [self.all_channels[chan_id] for chan_id in (available - claimed)] @callback def zha_send_event(self, event_data: Dict[str, Union[str, int]]) -> None: """Relay events to hass.""" self._channels.zha_send_event( { const.ATTR_UNIQUE_ID: self.unique_id, const.ATTR_ENDPOINT_ID: self.id, **event_data, } )
# Copyright 2015, Radware LTD. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import copy import netaddr import threading import time from neutron.api.v2 import attributes from neutron import context from neutron.i18n import _LE, _LW, _LI from neutron.plugins.common import constants from oslo_config import cfg from oslo_log import helpers as log_helpers from oslo_log import log as logging from oslo_utils import excutils from six.moves import queue as Queue import neutron_lbaas.common.cert_manager from neutron_lbaas.drivers.radware import base_v2_driver from neutron_lbaas.drivers.radware import exceptions as r_exc from neutron_lbaas.drivers.radware import rest_client as rest CERT_MANAGER_PLUGIN = neutron_lbaas.common.cert_manager.CERT_MANAGER_PLUGIN TEMPLATE_HEADER = {'Content-Type': 'application/vnd.com.radware.vdirect.' 'template-parameters+json'} PROVISION_HEADER = {'Content-Type': 'application/vnd.com.radware.' 'vdirect.status+json'} CREATE_SERVICE_HEADER = {'Content-Type': 'application/vnd.com.radware.' 'vdirect.adc-service-specification+json'} PROPERTY_DEFAULTS = {'type': 'none', 'cookie_name': 'none', 'url_path': '/', 'http_method': 'GET', 'expected_codes': '200', 'subnet': '255.255.255.255', 'mask': '255.255.255.255', 'gw': '255.255.255.255', } LOADBALANCER_PROPERTIES = ['vip_address', 'admin_state_up'] LISTENER_PROPERTIES = ['id', 'protocol_port', 'protocol', 'connection_limit', 'admin_state_up'] POOL_PROPERTIES = ['id', 'protocol', 'lb_algorithm', 'admin_state_up'] MEMBER_PROPERTIES = ['id', 'address', 'protocol_port', 'weight', 'admin_state_up', 'subnet', 'mask', 'gw'] SESSION_PERSISTENCY_PROPERTIES = ['type', 'cookie_name'] HEALTH_MONITOR_PROPERTIES = ['type', 'delay', 'timeout', 'max_retries', 'admin_state_up', 'url_path', 'http_method', 'expected_codes', 'id'] LOG = logging.getLogger(__name__) class RadwareLBaaSV2Driver(base_v2_driver.RadwareLBaaSBaseV2Driver): # # Assumptions: # 1) We have only one worflow that takes care of l2-l4 and service creation # 2) The workflow template exsists on the vDirect server # 3) The workflow expose one operaion named 'update' (plus ctor and dtor) # 4) The 'update' operation gets the loadbalancer object graph as input # 5) The object graph is enehanced by our code before it is sent to the # workflow # 6) Async operations are handled by a diffrent thread # def __init__(self, plugin): super(RadwareLBaaSV2Driver, self).__init__(plugin) rad = cfg.CONF.radwarev2 rad_debug = cfg.CONF.radwarev2_debug self.plugin = plugin self.service = { "name": "_REPLACE_", "tenantId": "_REPLACE_", "haPair": rad.service_ha_pair, "sessionMirroringEnabled": rad.service_session_mirroring_enabled, "primary": { "capacity": { "throughput": rad.service_throughput, "sslThroughput": rad.service_ssl_throughput, "compressionThroughput": rad.service_compression_throughput, "cache": rad.service_cache }, "network": { "type": "portgroup", "portgroups": '_REPLACE_' }, "adcType": rad.service_adc_type, "acceptableAdc": "Exact" } } if rad.service_resource_pool_ids: ids = rad.service_resource_pool_ids self.service['resourcePoolIds'] = [ {'id': id} for id in ids ] else: self.service['resourcePoolIds'] = [] if rad.service_isl_vlan: self.service['islVlan'] = rad.service_isl_vlan self.workflow_template_name = rad.workflow_template_name self.child_workflow_template_names = rad.child_workflow_template_names self.workflow_params = rad.workflow_params self.workflow_action_name = rad.workflow_action_name self.stats_action_name = rad.stats_action_name vdirect_address = rad.vdirect_address sec_server = rad.ha_secondary_address self.rest_client = rest.vDirectRESTClient( server=vdirect_address, secondary_server=sec_server, user=rad.vdirect_user, password=rad.vdirect_password) self.workflow_params['provision_service'] = rad_debug.provision_service self.workflow_params['configure_l3'] = rad_debug.configure_l3 self.workflow_params['configure_l4'] = rad_debug.configure_l4 self.queue = Queue.Queue() self.completion_handler = OperationCompletionHandler(self.queue, self.rest_client, plugin) self.workflow_templates_exists = False self.completion_handler.setDaemon(True) self.completion_handler_started = False def _start_completion_handling_thread(self): if not self.completion_handler_started: LOG.info(_LI('Starting operation completion handling thread')) self.completion_handler.start() self.completion_handler_started = True @staticmethod def _get_wf_name(lb): return 'LB_' + lb.id @log_helpers.log_method_call def _verify_workflow_templates(self): """Verify the existence of workflows on vDirect server.""" resource = '/api/workflowTemplate/' workflow_templates = {self.workflow_template_name: False} for child_wf_name in self.child_workflow_template_names: workflow_templates[child_wf_name] = False response = _rest_wrapper(self.rest_client.call('GET', resource, None, None), [200]) for workflow_template in workflow_templates.keys(): for template in response: if workflow_template == template['name']: workflow_templates[workflow_template] = True break for template, found in workflow_templates.items(): if not found: raise r_exc.WorkflowTemplateMissing( workflow_template=template) @log_helpers.log_method_call def workflow_exists(self, lb): """Create workflow for loadbalancer instance""" wf_name = self._get_wf_name(lb) wf_resource = '/api/workflow/%s' % (wf_name) try: _rest_wrapper(self.rest_client.call( 'GET', wf_resource, None, None), [200]) except Exception: return False return True @log_helpers.log_method_call def _create_workflow(self, lb, lb_network_id, proxy_network_id): """Create workflow for loadbalancer instance""" self._verify_workflow_templates() wf_name = self._get_wf_name(lb) service = copy.deepcopy(self.service) service['tenantId'] = lb.tenant_id service['name'] = 'srv_' + lb_network_id if lb_network_id != proxy_network_id: self.workflow_params["twoleg_enabled"] = True service['primary']['network']['portgroups'] = [ lb_network_id, proxy_network_id] else: self.workflow_params["twoleg_enabled"] = False service['primary']['network']['portgroups'] = [lb_network_id] tmpl_resource = '/api/workflowTemplate/%s?name=%s' % ( self.workflow_template_name, wf_name) _rest_wrapper(self.rest_client.call( 'POST', tmpl_resource, {'parameters': dict(self.workflow_params, service_params=service)}, TEMPLATE_HEADER)) @log_helpers.log_method_call def get_stats(self, ctx, lb): wf_name = self._get_wf_name(lb) resource = '/api/workflow/%s/action/%s' % ( wf_name, self.stats_action_name) response = _rest_wrapper(self.rest_client.call('POST', resource, None, TEMPLATE_HEADER), success_codes=[202]) LOG.debug('stats_action response: %s ', response) resource = '/api/workflow/%s/parameters' % (wf_name) response = _rest_wrapper(self.rest_client.call('GET', resource, None, TEMPLATE_HEADER), success_codes=[200]) LOG.debug('stats_values response: %s ', response) return response['stats'] @log_helpers.log_method_call def execute_workflow(self, ctx, manager, data_model, old_data_model=None, delete=False): lb = data_model.root_loadbalancer # Get possible proxy subnet. # Proxy subnet equals to LB subnet if no proxy # is necessary. # Get subnet id of any member located on different than # loadbalancer's network. If returned subnet id is the subnet id # of loadbalancer - all members are accesssible from loadbalancer's # network, meaning no second leg or static routes are required. # Otherwise, create proxy port on found member's subnet and get its # address as a proxy address for loadbalancer instance lb_subnet = self.plugin.db._core_plugin.get_subnet( ctx, lb.vip_subnet_id) proxy_subnet = lb_subnet proxy_port_subnet_id = self._get_proxy_port_subnet_id(lb) if proxy_port_subnet_id == lb.vip_subnet_id: proxy_port_address = lb.vip_address else: proxy_port_address = self._create_proxy_port_and_get_address( ctx, lb, proxy_port_subnet_id) proxy_subnet = self.plugin.db._core_plugin.get_subnet( ctx, proxy_port_subnet_id) # Check if workflow exist, create if not if not self.workflow_exists(lb): self._create_workflow(lb, lb_subnet['network_id'], proxy_subnet['network_id']) # Build objects graph objects_graph = self._build_objects_graph(ctx, lb, data_model, proxy_port_address, proxy_subnet) LOG.debug("Radware vDirect LB object graph is " + str(objects_graph)) wf_name = self._get_wf_name(lb) resource = '/api/workflow/%s/action/%s' % ( wf_name, self.workflow_action_name) response = _rest_wrapper(self.rest_client.call('POST', resource, {'parameters': objects_graph}, TEMPLATE_HEADER), success_codes=[202]) LOG.debug('_update_workflow response: %s ', response) oper = OperationAttributes( manager, response['uri'], lb, data_model, old_data_model, delete=delete) LOG.debug('Pushing operation %s to the queue', oper) self._start_completion_handling_thread() self.queue.put_nowait(oper) def remove_workflow(self, ctx, manager, lb): wf_name = self._get_wf_name(lb) LOG.debug('Remove the workflow %s' % wf_name) resource = '/api/workflow/%s' % (wf_name) rest_return = self.rest_client.call('DELETE', resource, None, None) response = _rest_wrapper(rest_return, [204, 202, 404]) if rest_return[rest.RESP_STATUS] in [404]: try: self._delete_proxy_port(ctx, lb) LOG.debug('Proxy port for LB %s was deleted', lb.id) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Proxy port deletion for LB %s ' 'failed'), lb.id) manager.successful_completion(ctx, lb, delete=True) else: oper = OperationAttributes( manager, response['uri'], lb, lb, old_data_model=None, delete=True) self._start_completion_handling_thread() self.queue.put_nowait(oper) def _build_objects_graph(self, ctx, lb, data_model, proxy_port_address, proxy_subnet): """Iterate over the LB model starting from root lb entity and build its JSON representtaion for vDirect """ graph = {} for prop in LOADBALANCER_PROPERTIES: graph[prop] = getattr(lb, prop, PROPERTY_DEFAULTS.get(prop)) graph['pip_address'] = proxy_port_address graph['listeners'] = [] listeners = [ listener for listener in lb.listeners if listener.provisioning_status != constants.PENDING_DELETE and (listener.default_pool and listener.default_pool.members)] for listener in listeners: listener_dict = {} for prop in LISTENER_PROPERTIES: listener_dict[prop] = getattr( listener, prop, PROPERTY_DEFAULTS.get(prop)) if listener.default_tls_container_id: default_cert = CERT_MANAGER_PLUGIN.CertManager.get_cert( listener.default_tls_container_id, service_name='Neutron LBaaS v2 Radware provider') cert_dict = { 'id': listener.default_tls_container_id, 'certificate': default_cert.get_certificate(), 'intermediates': default_cert.get_intermediates(), 'private_key': default_cert.get_private_key(), 'passphrase': default_cert.get_private_key_passphrase()} listener_dict['default_tls_certificate'] = cert_dict if listener.sni_containers: listener_dict['sni_tls_certificates'] = [] for sni_container in listener.sni_containers: sni_cert = CERT_MANAGER_PLUGIN.CertManager.get_cert( sni_container.tls_container_id, service_name='Neutron LBaaS v2 Radware provider') listener_dict['sni_tls_certificates'].append( {'id': sni_container.tls_container_id, 'position': sni_container.position, 'certificate': sni_cert.get_certificate(), 'intermediates': sni_cert.get_intermediates(), 'private_key': sni_cert.get_private_key(), 'passphrase': sni_cert.get_private_key_passphrase()}) if listener.default_pool: pool_dict = {} for prop in POOL_PROPERTIES: pool_dict[prop] = getattr( listener.default_pool, prop, PROPERTY_DEFAULTS.get(prop)) if listener.default_pool.healthmonitor: hm_dict = {} for prop in HEALTH_MONITOR_PROPERTIES: hm_dict[prop] = getattr( listener.default_pool.healthmonitor, prop, PROPERTY_DEFAULTS.get(prop)) pool_dict['healthmonitor'] = hm_dict if listener.default_pool.sessionpersistence: sess_pers_dict = {} for prop in SESSION_PERSISTENCY_PROPERTIES: sess_pers_dict[prop] = getattr( listener.default_pool.sessionpersistence, prop, PROPERTY_DEFAULTS.get(prop)) pool_dict['sessionpersistence'] = sess_pers_dict pool_dict['members'] = [] members = [ member for member in listener.default_pool.members if member.provisioning_status != constants.PENDING_DELETE] for member in members: member_dict = {} for prop in MEMBER_PROPERTIES: member_dict[prop] = getattr( member, prop, PROPERTY_DEFAULTS.get(prop)) if (proxy_port_address != lb.vip_address and netaddr.IPAddress(member.address) not in netaddr.IPNetwork(proxy_subnet['cidr'])): self._accomplish_member_static_route_data( ctx, member, member_dict, proxy_subnet['gateway_ip']) pool_dict['members'].append(member_dict) listener_dict['default_pool'] = pool_dict graph['listeners'].append(listener_dict) return graph def _get_lb_proxy_port_name(self, lb): return 'proxy_' + lb.id def _get_proxy_port_subnet_id(self, lb): """Look for at least one member of any listener's pool that is located on subnet different than loabalancer's subnet. If such member found, return its subnet id. Otherwise, return loadbalancer's subnet id """ for listener in lb.listeners: if listener.default_pool: for member in listener.default_pool.members: if lb.vip_subnet_id != member.subnet_id: return member.subnet_id return lb.vip_subnet_id def _create_proxy_port_and_get_address(self, ctx, lb, proxy_port_subnet_id): """Check if proxy port was created earlier. If not, create a new port on proxy subnet and return its ip address. Returns port IP address """ proxy_port_name = self._get_lb_proxy_port_name(lb) ports = self.plugin.db._core_plugin.get_ports( ctx, filters={'name': [proxy_port_name], }) if not ports: # Create pip port. Use the subnet # determined before by _get_pip_port_subnet_id() function proxy_port_subnet = self.plugin.db._core_plugin.get_subnet( ctx, proxy_port_subnet_id) proxy_port_data = { 'tenant_id': lb.tenant_id, 'name': proxy_port_name, 'network_id': proxy_port_subnet['network_id'], 'mac_address': attributes.ATTR_NOT_SPECIFIED, 'admin_state_up': False, 'device_id': '', 'device_owner': 'neutron:' + constants.LOADBALANCERV2, 'fixed_ips': [{'subnet_id': proxy_port_subnet_id}] } proxy_port = self.plugin.db._core_plugin.create_port( ctx, {'port': proxy_port_data}) else: proxy_port = ports[0] ips_on_subnet = [ip for ip in proxy_port['fixed_ips'] if ip['subnet_id'] == proxy_port_subnet_id] if not ips_on_subnet: raise Exception(_('Could not find or allocate ' 'IP address on subnet id %s for proxy port'), proxy_port_subnet_id) else: return ips_on_subnet[0]['ip_address'] def _delete_proxy_port(self, ctx, lb): port_filter = { 'name': [self._get_lb_proxy_port_name(lb)], } ports = self.plugin.db._core_plugin.get_ports( ctx, filters=port_filter) if ports: for port in ports: try: self.plugin.db._core_plugin.delete_port( ctx, port['id']) except Exception as exception: # stop exception propagation, nport may have # been deleted by other means LOG.warning(_LW('proxy port deletion failed: %r'), exception) def _accomplish_member_static_route_data(self, ctx, member, member_data, proxy_gateway_ip): member_ports = self.plugin.db._core_plugin.get_ports( ctx, filters={'fixed_ips': {'ip_address': [member.address]}, 'tenant_id': [member.tenant_id]}) if len(member_ports) == 1: member_subnet = self.plugin._core_plugin.get_subnet( ctx, member_ports[0]['fixed_ips'][0]['subnet_id']) member_network = netaddr.IPNetwork(member_subnet['cidr']) member_data['subnet'] = str(member_network.network) member_data['mask'] = str(member_network.netmask) else: member_data['subnet'] = member_data['address'] member_data['gw'] = proxy_gateway_ip class OperationCompletionHandler(threading.Thread): """Update DB with operation status or delete the entity from DB.""" def __init__(self, queue, rest_client, plugin): threading.Thread.__init__(self) self.queue = queue self.rest_client = rest_client self.plugin = plugin self.stoprequest = threading.Event() self.opers_to_handle_before_rest = 0 def join(self, timeout=None): self.stoprequest.set() super(OperationCompletionHandler, self).join(timeout) def handle_operation_completion(self, oper): result = self.rest_client.call('GET', oper.operation_url, None, None) LOG.debug('Operation completion requested %(uri) and got: %(result)', {'uri': oper.operation_url, 'result': result}) completed = result[rest.RESP_DATA]['complete'] reason = result[rest.RESP_REASON], description = result[rest.RESP_STR] if completed: # operation is done - update the DB with the status # or delete the entire graph from DB success = result[rest.RESP_DATA]['success'] sec_to_completion = time.time() - oper.creation_time debug_data = {'oper': oper, 'sec_to_completion': sec_to_completion, 'success': success} LOG.debug('Operation %(oper)s is completed after ' '%(sec_to_completion)d sec ' 'with success status: %(success)s :', debug_data) if not success: # failure - log it and set the return ERROR as DB state if reason or description: msg = 'Reason:%s. Description:%s' % (reason, description) else: msg = "unknown" error_params = {"operation": oper, "msg": msg} LOG.error(_LE( 'Operation %(operation)s failed. Reason: %(msg)s'), error_params) oper.status = constants.ERROR OperationCompletionHandler._run_post_failure_function(oper) else: oper.status = constants.ACTIVE OperationCompletionHandler._run_post_success_function(oper) return completed def run(self): while not self.stoprequest.isSet(): try: oper = self.queue.get(timeout=1) # Get the current queue size (N) and set the counter with it. # Handle N operations with no intermission. # Once N operations handles, get the size again and repeat. if self.opers_to_handle_before_rest <= 0: self.opers_to_handle_before_rest = self.queue.qsize() + 1 LOG.debug('Operation consumed from the queue: ' + str(oper)) # check the status - if oper is done: update the db , # else push the oper again to the queue if not self.handle_operation_completion(oper): LOG.debug('Operation %s is not completed yet..' % oper) # Not completed - push to the queue again self.queue.put_nowait(oper) self.queue.task_done() self.opers_to_handle_before_rest -= 1 # Take one second rest before start handling # new operations or operations handled before if self.opers_to_handle_before_rest <= 0: time.sleep(1) except Queue.Empty: continue except Exception: LOG.error(_LE( "Exception was thrown inside OperationCompletionHandler")) @staticmethod def _run_post_success_function(oper): try: ctx = context.get_admin_context(load_admin_roles=False) oper.manager.successful_completion(ctx, oper.data_model, delete=oper.delete) LOG.debug('Post-operation success function completed ' 'for operation %s', repr(oper)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Post-operation success function failed ' 'for operation %s'), repr(oper)) @staticmethod def _run_post_failure_function(oper): try: ctx = context.get_admin_context(load_admin_roles=False) oper.manager.failed_completion(ctx, oper.data_model) LOG.debug('Post-operation failure function completed ' 'for operation %s', repr(oper)) except Exception: with excutils.save_and_reraise_exception(): LOG.error(_LE('Post-operation failure function failed ' 'for operation %s'), repr(oper)) class OperationAttributes(object): """Holds operation attributes""" def __init__(self, manager, operation_url, lb, data_model=None, old_data_model=None, delete=False): self.manager = manager self.operation_url = operation_url self.lb = lb self.data_model = data_model self.old_data_model = old_data_model self.delete = delete self.creation_time = time.time() def __repr__(self): attrs = self.__dict__ items = ("%s = %r" % (k, v) for k, v in attrs.items()) return "<%s: {%s}>" % (self.__class__.__name__, ', '.join(items)) def _rest_wrapper(response, success_codes=None): """Wrap a REST call and make sure a valido status is returned.""" success_codes = success_codes or [202] if not response: raise r_exc.RESTRequestFailure( status=-1, reason="Unknown", description="Unknown", success_codes=success_codes ) elif response[rest.RESP_STATUS] not in success_codes: raise r_exc.RESTRequestFailure( status=response[rest.RESP_STATUS], reason=response[rest.RESP_REASON], description=response[rest.RESP_STR], success_codes=success_codes ) else: LOG.debug("this is a respone: %s" % (response,)) return response[rest.RESP_DATA]
from decimal import * from dataactcore.models.validationModels import RuleSql from dataactvalidator.validation_handlers.validationError import ValidationError from dataactcore.interfaces.interfaceHolder import InterfaceHolder from dataactcore.utils.cloudLogger import CloudLogger class Validator(object): """ Checks individual records against specified validation tests """ BOOLEAN_VALUES = ["TRUE","FALSE","YES","NO","1","0"] tableAbbreviations = {"appropriations":"approp","award_financial_assistance":"afa","award_financial":"af","object_class_program_activity":"op","appropriation":"approp"} # Set of metadata fields that should not be directly validated META_FIELDS = ["row_number"] @classmethod def crossValidateSql(cls, rules, submissionId): """ Evaluate all sql-based rules for cross file validation Args: rules -- List of Rule objects submissionId -- ID of submission to run cross-file validation """ failures = [] # Put each rule through evaluate, appending all failures into list interfaces = InterfaceHolder() # Get short to long colname dictionary shortColnames = interfaces.validationDb.getShortToLongColname() for rule in rules: failedRows = interfaces.validationDb.connection.execute( rule.rule_sql.format(submissionId)) if failedRows.rowcount: # get list of fields involved in this validation # note: row_number is metadata, not a field being # validated, so exclude it cols = failedRows.keys() cols.remove('row_number') columnString = ", ".join(shortColnames[c] if c in shortColnames else c for c in cols) for row in failedRows: # get list of values for each column values = ["{}: {}".format(shortColnames[c], str(row[c])) if c in shortColnames else "{}: {}".format(c, str(row[c])) for c in cols] values = ", ".join(values) targetFileType = interfaces.validationDb.getFileTypeById(rule.target_file_id) failures.append([rule.file.name, targetFileType, columnString, str(rule.rule_error_message), values, row['row_number'],str(rule.rule_label),rule.file_id,rule.target_file_id,rule.rule_severity_id]) # Return list of cross file validation failures return failures @classmethod def validate(cls, record, csvSchema): """ Run initial set of single file validation: - check if required fields are present - check if data type matches data type specified in schema - check that field length matches field length specified in schema Args: record -- dict representation of a single record of data csvSchema -- dict of schema for the current file. Returns: Tuple of three values: True if validation passed, False if failed List of failed rules, each with field, description of failure, value that failed, rule label, and severity True if type check passed, False if type failed """ recordFailed = False recordTypeFailure = False failedRules = [] for fieldName in csvSchema: if (csvSchema[fieldName].required and not fieldName in record): return False, [[fieldName, ValidationError.requiredError, "", "", "fatal"]], False for fieldName in record : if fieldName in cls.META_FIELDS: # Skip fields that are not user submitted continue checkRequiredOnly = False currentSchema = csvSchema[fieldName] currentData = record[fieldName] if(currentData != None): currentData = currentData.strip() if(currentData == None or len(currentData) == 0): if(currentSchema.required ): # If empty and required return field name and error recordFailed = True failedRules.append([fieldName, ValidationError.requiredError, "", "", "fatal"]) continue else: # If field is empty and not required its valid checkRequiredOnly = True # Always check the type in the schema if(not checkRequiredOnly and not Validator.checkType(currentData,currentSchema.field_type.name) ) : recordTypeFailure = True recordFailed = True failedRules.append([fieldName, ValidationError.typeError, currentData,"", "fatal"]) # Don't check value rules if type failed continue # Check length based on schema if currentSchema.length is not None and currentData is not None and len(currentData.strip()) > currentSchema.length: # Length failure, add to failedRules recordFailed = True failedRules.append([fieldName, ValidationError.lengthError, currentData,"", "warning"]) return (not recordFailed), failedRules, (not recordTypeFailure) @staticmethod def checkType(data,datatype) : """ Determine whether data is of the correct type Args: data: Data to be checked datatype: Type to check against Returns: True if data is of specified type, False otherwise """ if datatype is None: # If no type specified, don't need to check anything return True if(data.strip() == ""): # An empty string matches all types return True if(datatype == "STRING") : return(len(data) > 0) if(datatype == "BOOLEAN") : if(data.upper() in Validator.BOOLEAN_VALUES) : return True return False if(datatype == "INT") : try: int(data) return True except: return False if(datatype == "DECIMAL") : try: Decimal(data) return True except: return False if(datatype == "LONG"): try: int(data) return True except: return False raise ValueError("".join(["Data Type Error, Type: ",datatype,", Value: ",data])) @staticmethod def padToLength(data,padLength): """ Pad data with leading zeros Args: data: string to be padded padLength: length of string after padding Returns: padded string of length padLength """ if data is None: # Convert None to empty string so it can be padded with zeros return data data = data.strip() if data == "": # Empty values treated as null return None if len(data) <= padLength: return data.zfill(padLength) else: raise ValueError("".join(["Value is too long: ",str(data)])) @classmethod def validateFileBySql(cls, submissionId, fileType, interfaces): """ Check all SQL rules Args: submissionId: submission to be checked fileType: file type being checked interfaces: database interface objects Returns: List of errors found, each element has: field names error message values in fields involved row number rule label source file id target file id severity id """ CloudLogger.logError("VALIDATOR_INFO: ", "Beginning SQL validation rules on submissionID: " + str(submissionId) + " fileType: "+ fileType, "") # Pull all SQL rules for this file type fileId = interfaces.validationDb.getFileTypeIdByName(fileType) rules = interfaces.validationDb.session.query(RuleSql).filter(RuleSql.file_id == fileId).filter( RuleSql.rule_cross_file_flag == False).all() errors = [] # Get short to long colname dictionary shortColnames = interfaces.validationDb.getShortToLongColname() # For each rule, execute sql for rule for rule in rules: CloudLogger.logError("VALIDATOR_INFO: ", "Running query: "+str(RuleSql.query_name)+" on submissionID: " + str(submissionId) + " fileType: "+ fileType, "") failures = interfaces.stagingDb.connection.execute(rule.rule_sql.format(submissionId)) if failures.rowcount: # Create column list (exclude row_number) cols = failures.keys() cols.remove("row_number") # Build error list for failure in failures: errorMsg = rule.rule_error_message row = failure["row_number"] # Create strings for fields and values valueList = ["{}: {}".format(shortColnames[field], str(failure[field])) if field in shortColnames else "{}: {}".format(field, str(failure[field])) for field in cols] valueString = ", ".join(valueList) fieldList = [shortColnames[field] if field in shortColnames else field for field in cols] fieldString = ", ".join(fieldList) errors.append([fieldString, errorMsg, valueString, row, rule.rule_label, fileId, rule.target_file_id, rule.rule_severity_id]) CloudLogger.logError("VALIDATOR_INFO: ", "Completed SQL validation rules on submissionID: " + str(submissionId) + " fileType: "+ fileType, "") return errors
#!/usr/bin/env python from kazoo.client import KazooClient from urllib2 import * import os import logging import json import argparse import time class LeaderCore(object): def __init__(self, collection, shard, shard_range, core_name, node_name, node_port, core_dir): self.collection = collection self.shard = shard self.shard_range = shard_range self.core_name = core_name self.node_name = node_name self.node_port = node_port self.core_dir = core_dir def __eq__(self, other): if isinstance(other, self.__class__): return (other.collection == self.collection) and (other.shard_range == self.shard_range) else: return False def __ne__(self, other): return not self.__eq__(other) def __str__(self): s1 = str(self.collection) + '\n' + str(self.shard) + '\n' + str(self.shard_range) + '\n' s2 = str(self.core_name) + '\n' + str(self.node_name) + '\n' + str(self.node_port) + '\n' + str(self.core_dir) s = s1 + s2 + '\n' return s class SolrCloudBackup: solr_api_preffix = '/solr/admin/cores?action=STATUS&wt=json&core=' zk_server_port = '127.0.0.1:2181' backup_dir = '/tmp' leader_core_list_server = [] leader_core_list_filesystem = [] clusterstate_json = {} def __init__(self, zk_server_port, backup_dir): self.zk_server_port = zk_server_port self.backup_dir = backup_dir def get_clusterstate_json(self): logging.basicConfig() zk = KazooClient(hosts=self.zk_server_port, read_only=True) zk.start() data, stat = zk.get('/clusterstate.json') self.clusterstate_json = json.loads(data.decode('utf-8')) zk.stop() zk.close() def get_cores_definitions_server(self): self.get_clusterstate_json() for collection in self.clusterstate_json: for shard in self.clusterstate_json[collection]['shards']: shard_obj = self.clusterstate_json[collection]['shards'][shard] for core in shard_obj['replicas']: core_obj = shard_obj['replicas'][core] if core_obj['state'] == 'active' and 'leader' in core_obj and core_obj['leader']: # get leader core node_name = core_obj['node_name'].split(":")[0] node_port = core_obj['node_name'].split(":")[1].split("_")[0] core_name = core_obj['core'] shard_range = shard_obj['range'] url_string = 'http://' + node_name + ':' + node_port + self.solr_api_preffix + core_name core_status_obj = json.load(urlopen(url_string)) core_dir = core_status_obj['status'][core_name]['instanceDir'] leader_core_obj = LeaderCore( collection=collection, shard=shard, shard_range=shard_range, core_name=core_name, node_name=node_name, node_port=node_port, core_dir=core_dir ) self.leader_core_list_server.append(leader_core_obj) return self.leader_core_list_server def get_cores_definitions_filesystem(self): latest_backup_dir = self.backup_dir + '/' + str(max(map(int, os.listdir(self.backup_dir)))) for collection_fs in os.listdir(latest_backup_dir): for shard_range_fs in os.listdir(latest_backup_dir + '/' + collection_fs): core_dir_fs = latest_backup_dir + '/' + collection_fs + '/' + shard_range_fs + '/' leader_core_obj = LeaderCore( collection=collection_fs, shard='empty_field', shard_range=shard_range_fs, core_name='empty_field', node_name='empty_field', node_port='empty_field', core_dir=core_dir_fs ) self.leader_core_list_filesystem.append(leader_core_obj) return self.leader_core_list_filesystem def compare_cores_structures(self, f_cores_list, s_cores_list): eq_cores = 0 structures_is_equal = False f_len = len(f_cores_list) s_len = len(s_cores_list) for f_core in f_cores_list: if f_core in s_cores_list: eq_cores += 1 if f_len != 0 and s_len != 0 and f_len == s_len and eq_cores == s_len: structures_is_equal = True else: print 'ERROR: Different cores number in backup and on server OR different sharding schema.' return structures_is_equal def backup_from_server_to_filesystem(self): s_cores_list = self.get_cores_definitions_server() ts = str(int(time.time())) exclude_list = '--exclude "tlog/" --exclude "data/replication.properties" --exclude "core.properties" --exclude "data/index/write.lock" ' options_list = '-arvv --delete-before ' for core in s_cores_list: bck_path = self.backup_dir + '/' + ts + '/' + core.collection + '/' + core.shard_range + '/' mkdir_cmd = 'mkdir -p ' + bck_path p = os.popen(mkdir_cmd, "r") while 1: line = p.readline() if not line: break print line rsync_cmd = 'rsync ' + options_list + exclude_list + core.node_name + ':' + core.core_dir + ' ' + bck_path for i in range(1, 5): # dirty loop, attempt to be consistent print '-***-' print core.node_name, core.core_dir, 'iteration', i print '-***-' p = os.popen(rsync_cmd, "r") while 1: line = p.readline() if not line: break print line return True def restore_from_filesystem_to_server(self): options_list = '-arvv --delete ' f_cores_list = self.get_cores_definitions_filesystem() s_cores_list = self.get_cores_definitions_server() restore_possible = self.compare_cores_structures(f_cores_list=f_cores_list, s_cores_list=s_cores_list) if restore_possible: print 'Doing restore...' for s_core in s_cores_list: for f_core in f_cores_list: if s_core == f_core: print 'Restoring ' + f_core.collection, f_core.shard_range, f_core.core_dir, s_core.collection, s_core.shard_range, s_core.core_dir rsync_cmd = 'rsync ' + options_list + '' + f_core.core_dir + 'data/' + ' ' + s_core.node_name + ':' + s_core.core_dir + 'data/' for i in range(1, 5): # dirty loop, attempt to be consistent print '-***-' print f_core.collection, f_core.shard_range, 'iteration', i print '-***-' p = os.popen(rsync_cmd, "r") while 1: line = p.readline() if not line: break print line else: print 'Impossible to restore backup!' return False if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('zk_server_port', help='zookeeper server and port, default is 127.0.0.1:2181') parser.add_argument('backup_dir', help='directory to store backup, default is /tmp') parser.add_argument('action', help='"backup" OR "restore"') args = parser.parse_args() clb = SolrCloudBackup(zk_server_port=args.zk_server_port, backup_dir=args.backup_dir) if args.action == 'backup': r = clb.backup_from_server_to_filesystem() if args.action == 'restore': r = clb.restore_from_filesystem_to_server()
import datetime import uuid from django.conf import settings from django.core.exceptions import FieldError from django.db import utils from django.db.backends.base.operations import BaseDatabaseOperations from django.db.models import aggregates, fields from django.db.models.expressions import Col from django.utils import timezone from django.utils.dateparse import parse_date, parse_datetime, parse_time from django.utils.duration import duration_string class DatabaseOperations(BaseDatabaseOperations): cast_char_field_without_max_length = 'text' def bulk_batch_size(self, fields, objs): """ SQLite has a compile-time default (SQLITE_LIMIT_VARIABLE_NUMBER) of 999 variables per query. If there's only a single field to insert, the limit is 500 (SQLITE_MAX_COMPOUND_SELECT). """ if len(fields) == 1: return 500 elif len(fields) > 1: return self.connection.features.max_query_params // len(fields) else: return len(objs) def check_expression_support(self, expression): bad_fields = (fields.DateField, fields.DateTimeField, fields.TimeField) bad_aggregates = (aggregates.Sum, aggregates.Avg, aggregates.Variance, aggregates.StdDev) if isinstance(expression, bad_aggregates): for expr in expression.get_source_expressions(): try: output_field = expr.output_field except FieldError: # Not every subexpression has an output_field which is fine # to ignore. pass else: if isinstance(output_field, bad_fields): raise NotImplementedError( 'You cannot use Sum, Avg, StdDev, and Variance ' 'aggregations on date/time fields in sqlite3 ' 'since date/time is saved as text.' ) def date_extract_sql(self, lookup_type, field_name): """ Support EXTRACT with a user-defined function django_date_extract() that's registered in connect(). Use single quotes because this is a string and could otherwise cause a collision with a field name. """ return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name) def date_interval_sql(self, timedelta): return "'%s'" % duration_string(timedelta) def format_for_duration_arithmetic(self, sql): """Do nothing since formatting is handled in the custom function.""" return sql def date_trunc_sql(self, lookup_type, field_name): return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name) def time_trunc_sql(self, lookup_type, field_name): return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name) def _convert_tzname_to_sql(self, tzname): return "'%s'" % tzname if settings.USE_TZ else 'NULL' def datetime_cast_date_sql(self, field_name, tzname): return "django_datetime_cast_date(%s, %s)" % ( field_name, self._convert_tzname_to_sql(tzname), ) def datetime_cast_time_sql(self, field_name, tzname): return "django_datetime_cast_time(%s, %s)" % ( field_name, self._convert_tzname_to_sql(tzname), ) def datetime_extract_sql(self, lookup_type, field_name, tzname): return "django_datetime_extract('%s', %s, %s)" % ( lookup_type.lower(), field_name, self._convert_tzname_to_sql(tzname), ) def datetime_trunc_sql(self, lookup_type, field_name, tzname): return "django_datetime_trunc('%s', %s, %s)" % ( lookup_type.lower(), field_name, self._convert_tzname_to_sql(tzname), ) def time_extract_sql(self, lookup_type, field_name): return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name) def pk_default_value(self): return "NULL" def _quote_params_for_last_executed_query(self, params): """ Only for last_executed_query! Don't use this to execute SQL queries! """ # This function is limited both by SQLITE_LIMIT_VARIABLE_NUMBER (the # number of parameters, default = 999) and SQLITE_MAX_COLUMN (the # number of return values, default = 2000). Since Python's sqlite3 # module doesn't expose the get_limit() C API, assume the default # limits are in effect and split the work in batches if needed. BATCH_SIZE = 999 if len(params) > BATCH_SIZE: results = () for index in range(0, len(params), BATCH_SIZE): chunk = params[index:index + BATCH_SIZE] results += self._quote_params_for_last_executed_query(chunk) return results sql = 'SELECT ' + ', '.join(['QUOTE(?)'] * len(params)) # Bypass Django's wrappers and use the underlying sqlite3 connection # to avoid logging this query - it would trigger infinite recursion. cursor = self.connection.connection.cursor() # Native sqlite3 cursors cannot be used as context managers. try: return cursor.execute(sql, params).fetchone() finally: cursor.close() def last_executed_query(self, cursor, sql, params): # Python substitutes parameters in Modules/_sqlite/cursor.c with: # pysqlite_statement_bind_parameters(self->statement, parameters, allow_8bit_chars); # Unfortunately there is no way to reach self->statement from Python, # so we quote and substitute parameters manually. if params: if isinstance(params, (list, tuple)): params = self._quote_params_for_last_executed_query(params) else: values = tuple(params.values()) values = self._quote_params_for_last_executed_query(values) params = dict(zip(params, values)) return sql % params # For consistency with SQLiteCursorWrapper.execute(), just return sql # when there are no parameters. See #13648 and #17158. else: return sql def quote_name(self, name): if name.startswith('"') and name.endswith('"'): return name # Quoting once is enough. return '"%s"' % name def no_limit_value(self): return -1 def sql_flush(self, style, tables, sequences, allow_cascade=False): sql = ['%s %s %s;' % ( style.SQL_KEYWORD('DELETE'), style.SQL_KEYWORD('FROM'), style.SQL_FIELD(self.quote_name(table)) ) for table in tables] # Note: No requirement for reset of auto-incremented indices (cf. other # sql_flush() implementations). Just return SQL at this point return sql def execute_sql_flush(self, using, sql_list): # To prevent possible violation of foreign key constraints, deactivate # constraints outside of the transaction created in super(). with self.connection.constraint_checks_disabled(): super().execute_sql_flush(using, sql_list) def adapt_datetimefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): if settings.USE_TZ: value = timezone.make_naive(value, self.connection.timezone) else: raise ValueError("SQLite backend does not support timezone-aware datetimes when USE_TZ is False.") return str(value) def adapt_timefield_value(self, value): if value is None: return None # Expression values are adapted by the database. if hasattr(value, 'resolve_expression'): return value # SQLite doesn't support tz-aware datetimes if timezone.is_aware(value): raise ValueError("SQLite backend does not support timezone-aware times.") return str(value) def get_db_converters(self, expression): converters = super().get_db_converters(expression) internal_type = expression.output_field.get_internal_type() if internal_type == 'DateTimeField': converters.append(self.convert_datetimefield_value) elif internal_type == 'DateField': converters.append(self.convert_datefield_value) elif internal_type == 'TimeField': converters.append(self.convert_timefield_value) # Converter for Col is added with Database.register_converter() # in base.py. elif internal_type == 'DecimalField' and not isinstance(expression, Col): converters.append(self.convert_decimalfield_value) elif internal_type == 'UUIDField': converters.append(self.convert_uuidfield_value) elif internal_type in ('NullBooleanField', 'BooleanField'): converters.append(self.convert_booleanfield_value) return converters def convert_datetimefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.datetime): value = parse_datetime(value) if settings.USE_TZ and not timezone.is_aware(value): value = timezone.make_aware(value, self.connection.timezone) return value def convert_datefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.date): value = parse_date(value) return value def convert_timefield_value(self, value, expression, connection): if value is not None: if not isinstance(value, datetime.time): value = parse_time(value) return value def convert_decimalfield_value(self, value, expression, connection): if value is not None: value = expression.output_field.format_number(value) # Value is not converted to Decimal here as it will be converted # later in BaseExpression.convert_value(). return value def convert_uuidfield_value(self, value, expression, connection): if value is not None: value = uuid.UUID(value) return value def convert_booleanfield_value(self, value, expression, connection): return bool(value) if value in (1, 0) else value def bulk_insert_sql(self, fields, placeholder_rows): return " UNION ALL ".join( "SELECT %s" % ", ".join(row) for row in placeholder_rows ) def combine_expression(self, connector, sub_expressions): # SQLite doesn't have a power function, so we fake it with a # user-defined function django_power that's registered in connect(). if connector == '^': return 'django_power(%s)' % ','.join(sub_expressions) return super().combine_expression(connector, sub_expressions) def combine_duration_expression(self, connector, sub_expressions): if connector not in ['+', '-']: raise utils.DatabaseError('Invalid connector for timedelta: %s.' % connector) fn_params = ["'%s'" % connector] + sub_expressions if len(fn_params) > 3: raise ValueError('Too many params for timedelta operations.') return "django_format_dtdelta(%s)" % ', '.join(fn_params) def integer_field_range(self, internal_type): # SQLite doesn't enforce any integer constraints return (None, None) def subtract_temporals(self, internal_type, lhs, rhs): lhs_sql, lhs_params = lhs rhs_sql, rhs_params = rhs if internal_type == 'TimeField': return "django_time_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params return "django_timestamp_diff(%s, %s)" % (lhs_sql, rhs_sql), lhs_params + rhs_params
""" Script to verify all examples in the readme. Simply execute python test_readme_examples.py The tests in this file are currently not unittests! They do plot images. TODO move this to checks/ ? """ from __future__ import print_function, division import functools def main(): example_simple_training_setting() example_very_complex_augmentation_pipeline() example_augment_images_and_keypoints() example_augment_images_and_bounding_boxes() example_augment_images_and_polygons() example_augment_images_and_linestrings() example_augment_images_and_heatmaps() example_augment_images_and_segmentation_maps() example_visualize_augmented_images() example_visualize_augmented_non_image_data() example_using_augmenters_only_once() example_multicore_augmentation() example_probability_distributions_as_parameters() example_withchannels() example_hooks() def seeded(func): @functools.wraps(func) def wrapper(*args, **kwargs): import imgaug.random as iarandom iarandom.seed(0) func(*args, **kwargs) return wrapper @seeded def example_simple_training_setting(): print("Example: Simple Training Setting") import numpy as np import imgaug.augmenters as iaa def load_batch(batch_idx): # dummy function, implement this # Return a numpy array of shape (N, height, width, #channels) # or a list of (height, width, #channels) arrays (may have different image # sizes). # Images should be in RGB for colorspace augmentations. # (cv2.imread() returns BGR!) # Images should usually be in uint8 with values from 0-255. return np.zeros((128, 32, 32, 3), dtype=np.uint8) + (batch_idx % 255) def train_on_images(images): # dummy function, implement this pass # Pipeline: # (1) Crop images from each side by 1-16px, do not resize the results # images back to the input size. Keep them at the cropped size. # (2) Horizontally flip 50% of the images. # (3) Blur images using a gaussian kernel with sigma between 0.0 and 3.0. seq = iaa.Sequential([ iaa.Crop(px=(1, 16), keep_size=False), iaa.Fliplr(0.5), iaa.GaussianBlur(sigma=(0, 3.0)) ]) for batch_idx in range(100): images = load_batch(batch_idx) images_aug = seq(images=images) # done by the library train_on_images(images_aug) # ----- # Make sure that the example really does something if batch_idx == 0: assert not np.array_equal(images, images_aug) @seeded def example_very_complex_augmentation_pipeline(): print("Example: Very Complex Augmentation Pipeline") import numpy as np import imgaug as ia import imgaug.augmenters as iaa # random example images images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8) # Sometimes(0.5, ...) applies the given augmenter in 50% of all cases, # e.g. Sometimes(0.5, GaussianBlur(0.3)) would blur roughly every second image. sometimes = lambda aug: iaa.Sometimes(0.5, aug) # Define our sequence of augmentation steps that will be applied to every image # All augmenters with per_channel=0.5 will sample one value _per image_ # in 50% of all cases. In all other cases they will sample new values # _per channel_. seq = iaa.Sequential( [ # apply the following augmenters to most images iaa.Fliplr(0.5), # horizontally flip 50% of all images iaa.Flipud(0.2), # vertically flip 20% of all images # crop images by -5% to 10% of their height/width sometimes(iaa.CropAndPad( percent=(-0.05, 0.1), pad_mode=ia.ALL, pad_cval=(0, 255) )), sometimes(iaa.Affine( scale={"x": (0.8, 1.2), "y": (0.8, 1.2)}, # scale images to 80-120% of their size, individually per axis translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)}, # translate by -20 to +20 percent (per axis) rotate=(-45, 45), # rotate by -45 to +45 degrees shear=(-16, 16), # shear by -16 to +16 degrees order=[0, 1], # use nearest neighbour or bilinear interpolation (fast) cval=(0, 255), # if mode is constant, use a cval between 0 and 255 mode=ia.ALL # use any of scikit-image's warping modes (see 2nd image from the top for examples) )), # execute 0 to 5 of the following (less important) augmenters per image # don't execute all of them, as that would often be way too strong iaa.SomeOf((0, 5), [ sometimes(iaa.Superpixels(p_replace=(0, 1.0), n_segments=(20, 200))), # convert images into their superpixel representation iaa.OneOf([ iaa.GaussianBlur((0, 3.0)), # blur images with a sigma between 0 and 3.0 iaa.AverageBlur(k=(2, 7)), # blur image using local means with kernel sizes between 2 and 7 iaa.MedianBlur(k=(3, 11)), # blur image using local medians with kernel sizes between 2 and 7 ]), iaa.Sharpen(alpha=(0, 1.0), lightness=(0.75, 1.5)), # sharpen images iaa.Emboss(alpha=(0, 1.0), strength=(0, 2.0)), # emboss images # search either for all edges or for directed edges, # blend the result with the original image using a blobby mask iaa.SimplexNoiseAlpha(iaa.OneOf([ iaa.EdgeDetect(alpha=(0.5, 1.0)), iaa.DirectedEdgeDetect(alpha=(0.5, 1.0), direction=(0.0, 1.0)), ])), iaa.AdditiveGaussianNoise(loc=0, scale=(0.0, 0.05*255), per_channel=0.5), # add gaussian noise to images iaa.OneOf([ iaa.Dropout((0.01, 0.1), per_channel=0.5), # randomly remove up to 10% of the pixels iaa.CoarseDropout((0.03, 0.15), size_percent=(0.02, 0.05), per_channel=0.2), ]), iaa.Invert(0.05, per_channel=True), # invert color channels iaa.Add((-10, 10), per_channel=0.5), # change brightness of images (by -10 to 10 of original value) iaa.AddToHueAndSaturation((-20, 20)), # change hue and saturation # either change the brightness of the whole image (sometimes # per channel) or change the brightness of subareas iaa.OneOf([ iaa.Multiply((0.5, 1.5), per_channel=0.5), iaa.FrequencyNoiseAlpha( exponent=(-4, 0), first=iaa.Multiply((0.5, 1.5), per_channel=True), second=iaa.LinearContrast((0.5, 2.0)) ) ]), iaa.LinearContrast((0.5, 2.0), per_channel=0.5), # improve or worsen the contrast iaa.Grayscale(alpha=(0.0, 1.0)), sometimes(iaa.ElasticTransformation(alpha=(0.5, 3.5), sigma=0.25)), # move pixels locally around (with random strengths) sometimes(iaa.PiecewiseAffine(scale=(0.01, 0.05))), # sometimes move parts of the image around sometimes(iaa.PerspectiveTransform(scale=(0.01, 0.1))) ], random_order=True ) ], random_order=True ) images_aug = seq(images=images) # ----- # Make sure that the example really does something assert not np.array_equal(images, images_aug) @seeded def example_augment_images_and_keypoints(): print("Example: Augment Images and Keypoints") import numpy as np import imgaug.augmenters as iaa images = np.zeros((2, 128, 128, 3), dtype=np.uint8) # two example images images[:, 64, 64, :] = 255 points = [ [(10.5, 20.5)], # points on first image [(50.5, 50.5), (60.5, 60.5), (70.5, 70.5)] # points on second image ] seq = iaa.Sequential([ iaa.AdditiveGaussianNoise(scale=0.05*255), iaa.Affine(translate_px={"x": (1, 5)}) ]) # augment keypoints and images images_aug, points_aug = seq(images=images, keypoints=points) print("Image 1 center", np.argmax(images_aug[0, 64, 64:64+6, 0])) print("Image 2 center", np.argmax(images_aug[1, 64, 64:64+6, 0])) print("Points 1", points_aug[0]) print("Points 2", points_aug[1]) @seeded def example_augment_images_and_bounding_boxes(): print("Example: Augment Images and Bounding Boxes") import numpy as np import imgaug as ia import imgaug.augmenters as iaa images = np.zeros((2, 128, 128, 3), dtype=np.uint8) # two example images images[:, 64, 64, :] = 255 bbs = [ [ia.BoundingBox(x1=10.5, y1=15.5, x2=30.5, y2=50.5)], [ia.BoundingBox(x1=10.5, y1=20.5, x2=50.5, y2=50.5), ia.BoundingBox(x1=40.5, y1=75.5, x2=70.5, y2=100.5)] ] seq = iaa.Sequential([ iaa.AdditiveGaussianNoise(scale=0.05*255), iaa.Affine(translate_px={"x": (1, 5)}) ]) images_aug, bbs_aug = seq(images=images, bounding_boxes=bbs) @seeded def example_augment_images_and_polygons(): print("Example: Augment Images and Polygons") import numpy as np import imgaug as ia import imgaug.augmenters as iaa images = np.zeros((2, 128, 128, 3), dtype=np.uint8) # two example images images[:, 64, 64, :] = 255 polygons = [ [ia.Polygon([(10.5, 10.5), (50.5, 10.5), (50.5, 50.5)])], [ia.Polygon([(0.0, 64.5), (64.5, 0.0), (128.0, 128.0), (64.5, 128.0)])] ] seq = iaa.Sequential([ iaa.AdditiveGaussianNoise(scale=0.05*255), iaa.Affine(translate_px={"x": (1, 5)}) ]) images_aug, polygons_aug = seq(images=images, polygons=polygons) @seeded def example_augment_images_and_linestrings(): print("Example: Augment Images and LineStrings") import numpy as np import imgaug as ia import imgaug.augmenters as iaa images = np.zeros((2, 128, 128, 3), dtype=np.uint8) # two example images images[:, 64, 64, :] = 255 ls = [ [ia.LineString([(10.5, 10.5), (50.5, 10.5), (50.5, 50.5)])], [ia.LineString([(0.0, 64.5), (64.5, 0.0), (128.0, 128.0), (64.5, 128.0), (128.0, 0.0)])] ] seq = iaa.Sequential([ iaa.AdditiveGaussianNoise(scale=0.05*255), iaa.Affine(translate_px={"x": (1, 5)}) ]) images_aug, ls_aug = seq(images=images, line_strings=ls) @seeded def example_augment_images_and_heatmaps(): print("Example: Augment Images and Heatmaps") import numpy as np import imgaug.augmenters as iaa # Standard scenario: You have N RGB-images and additionally 21 heatmaps per # image. You want to augment each image and its heatmaps identically. images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8) heatmaps = np.random.random(size=(16, 64, 64, 1)).astype(np.float32) seq = iaa.Sequential([ iaa.GaussianBlur((0, 3.0)), iaa.Affine(translate_px={"x": (-40, 40)}), iaa.Crop(px=(0, 10)) ]) images_aug, heatmaps_aug = seq(images=images, heatmaps=heatmaps) @seeded def example_augment_images_and_segmentation_maps(): print("Example: Augment Images and Segmentation Maps") import numpy as np import imgaug.augmenters as iaa # Standard scenario: You have N=16 RGB-images and additionally one segmentation # map per image. You want to augment each image and its heatmaps identically. images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8) segmaps = np.random.randint(0, 10, size=(16, 64, 64, 1), dtype=np.int32) seq = iaa.Sequential([ iaa.GaussianBlur((0, 3.0)), iaa.Affine(translate_px={"x": (-40, 40)}), iaa.Crop(px=(0, 10)) ]) images_aug, segmaps_aug = seq(images=images, segmentation_maps=segmaps) @seeded def example_visualize_augmented_images(): print("Example: Visualize Augmented Images") import numpy as np import imgaug.augmenters as iaa images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8) seq = iaa.Sequential([iaa.Fliplr(0.5), iaa.GaussianBlur((0, 3.0))]) # Show an image with 8*8 augmented versions of image 0 and 8*8 augmented # versions of image 1. Identical augmentations will be applied to # image 0 and 1. seq.show_grid([images[0], images[1]], cols=8, rows=8) @seeded def example_visualize_augmented_non_image_data(): print("Example: Visualize Augmented Non-Image Data") import numpy as np import imgaug as ia image = np.zeros((64, 64, 3), dtype=np.uint8) # points kps = [ia.Keypoint(x=10.5, y=20.5), ia.Keypoint(x=60.5, y=60.5)] kpsoi = ia.KeypointsOnImage(kps, shape=image.shape) image_with_kps = kpsoi.draw_on_image(image, size=7, color=(0, 0, 255)) ia.imshow(image_with_kps) # bbs bbsoi = ia.BoundingBoxesOnImage([ ia.BoundingBox(x1=10.5, y1=20.5, x2=50.5, y2=30.5) ], shape=image.shape) image_with_bbs = bbsoi.draw_on_image(image) image_with_bbs = ia.BoundingBox( x1=50.5, y1=10.5, x2=100.5, y2=16.5 ).draw_on_image(image_with_bbs, color=(255, 0, 0), size=3) ia.imshow(image_with_bbs) # polygons psoi = ia.PolygonsOnImage([ ia.Polygon([(10.5, 20.5), (50.5, 30.5), (10.5, 50.5)]) ], shape=image.shape) image_with_polys = psoi.draw_on_image( image, alpha_points=0, alpha_face=0.5, color_lines=(255, 0, 0)) ia.imshow(image_with_polys) # heatmaps # pick first result via [0] here, because one image per heatmap channel # is generated hms = ia.HeatmapsOnImage(np.random.random(size=(32, 32, 1)).astype(np.float32), shape=image.shape) image_with_hms = hms.draw_on_image(image)[0] ia.imshow(image_with_hms) @seeded def example_using_augmenters_only_once(): print("Example: Using Augmenters Only Once") from imgaug import augmenters as iaa import numpy as np images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8) # always horizontally flip each input image images_aug = iaa.Fliplr(1.0)(images=images) # vertically flip each input image with 90% probability images_aug = iaa.Flipud(0.9)(images=images) # blur 50% of all images using a gaussian kernel with a sigma of 3.0 images_aug = iaa.Sometimes(0.5, iaa.GaussianBlur(3.0))(images=images) @seeded def example_multicore_augmentation(): print("Example: Multicore Augmentation") import skimage.data import imgaug as ia import imgaug.augmenters as iaa from imgaug.augmentables.batches import UnnormalizedBatch # Number of batches and batch size for this example nb_batches = 10 batch_size = 32 # Example augmentation sequence to run in the background augseq = iaa.Sequential([ iaa.Fliplr(0.5), iaa.CoarseDropout(p=0.1, size_percent=0.1) ]) # For simplicity, we use the same image here many times astronaut = skimage.data.astronaut() astronaut = ia.imresize_single_image(astronaut, (64, 64)) # Make batches out of the example image (here: 10 batches, each 32 times # the example image) batches = [] for _ in range(nb_batches): batches.append(UnnormalizedBatch(images=[astronaut] * batch_size)) # Show the augmented images. # Note that augment_batches() returns a generator. for images_aug in augseq.augment_batches(batches, background=True): ia.imshow(ia.draw_grid(images_aug.images_aug, cols=8)) @seeded def example_probability_distributions_as_parameters(): print("Example: Probability Distributions as Parameters") import numpy as np from imgaug import augmenters as iaa from imgaug import parameters as iap images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8) # Blur by a value sigma which is sampled from a uniform distribution # of range 10.1 <= x < 13.0. # The convenience shortcut for this is: GaussianBlur((10.1, 13.0)) blurer = iaa.GaussianBlur(10 + iap.Uniform(0.1, 3.0)) images_aug = blurer(images=images) # Blur by a value sigma which is sampled from a gaussian distribution # N(1.0, 0.1), i.e. sample a value that is usually around 1.0. # Clip the resulting value so that it never gets below 0.1 or above 3.0. blurer = iaa.GaussianBlur(iap.Clip(iap.Normal(1.0, 0.1), 0.1, 3.0)) images_aug = blurer(images=images) @seeded def example_withchannels(): print("Example: WithChannels") import numpy as np import imgaug.augmenters as iaa # fake RGB images images = np.random.randint(0, 255, (16, 128, 128, 3), dtype=np.uint8) # add a random value from the range (-30, 30) to the first two channels of # input images (e.g. to the R and G channels) aug = iaa.WithChannels( channels=[0, 1], children=iaa.Add((-30, 30)) ) images_aug = aug(images=images) @seeded def example_hooks(): print("Example: Hooks") import numpy as np import imgaug as ia import imgaug.augmenters as iaa # Images and heatmaps, just arrays filled with value 30. # We define the heatmaps here as uint8 arrays as we are going to feed them # through the pipeline similar to normal images. In that way, every # augmenter is applied to them. images = np.full((16, 128, 128, 3), 30, dtype=np.uint8) heatmaps = np.full((16, 128, 128, 21), 30, dtype=np.uint8) # add vertical lines to see the effect of flip images[:, 16:128-16, 120:124, :] = 120 heatmaps[:, 16:128-16, 120:124, :] = 120 seq = iaa.Sequential([ iaa.Fliplr(0.5, name="Flipper"), iaa.GaussianBlur((0, 3.0), name="GaussianBlur"), iaa.Dropout(0.02, name="Dropout"), iaa.AdditiveGaussianNoise(scale=0.01*255, name="MyLittleNoise"), iaa.AdditiveGaussianNoise(loc=32, scale=0.0001*255, name="SomeOtherNoise"), iaa.Affine(translate_px={"x": (-40, 40)}, name="Affine") ]) # change the activated augmenters for heatmaps, # we only want to execute horizontal flip, affine transformation and one of # the gaussian noises def activator_heatmaps(images, augmenter, parents, default): if augmenter.name in ["GaussianBlur", "Dropout", "MyLittleNoise"]: return False else: # default value for all other augmenters return default hooks_heatmaps = ia.HooksImages(activator=activator_heatmaps) # call to_deterministic() once per batch, NOT only once at the start seq_det = seq.to_deterministic() images_aug = seq_det(images=images) heatmaps_aug = seq_det(images=heatmaps, hooks=hooks_heatmaps) # ----------- ia.show_grid(images_aug) ia.show_grid(heatmaps_aug[..., 0:3]) if __name__ == "__main__": main()
#!/usr/bin/env python # Licensed to Cloudera, Inc. under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. Cloudera, Inc. licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import logging import math import numbers import uuid from django.utils.html import escape from desktop.lib.i18n import smart_unicode from notebook.connectors.base import Notebook LOG = logging.getLogger(__name__) # Materialize and HTML escape results def escape_rows(rows, nulls_only=False): data = [] for row in rows: escaped_row = [] for field in row: if isinstance(field, numbers.Number): if math.isnan(field) or math.isinf(field): escaped_field = json.dumps(field) else: escaped_field = field elif field is None: escaped_field = 'NULL' else: escaped_field = smart_unicode(field, errors='replace') # Prevent error when getting back non utf8 like charset=iso-8859-1 if not nulls_only: escaped_field = escape(escaped_field).replace(' ', '&nbsp;') escaped_row.append(escaped_field) data.append(escaped_row) return data def make_notebook(name='Browse', description='', editor_type='hive', statement='', status='ready', files=None, functions=None, settings=None, is_saved=False, database='default', snippet_properties=None, batch_submit=False, on_success_url=None, skip_historify=False, is_task=False, last_executed=-1, is_notebook=False): ''' skip_historify: do not add the task to the query history. e.g. SQL Dashboard isManaged: true when being a managed by Hue operation (include_managed=True in document), e.g. exporting query result, dropping some tables ''' from notebook.connectors.hiveserver2 import HS2Api editor = Notebook() if snippet_properties is None: snippet_properties = {} if editor_type == 'hive': sessions_properties = HS2Api.get_properties(editor_type) if files is not None: _update_property_value(sessions_properties, 'files', files) if functions is not None: _update_property_value(sessions_properties, 'functions', functions) if settings is not None: _update_property_value(sessions_properties, 'settings', settings) elif editor_type == 'impala': sessions_properties = HS2Api.get_properties(editor_type) if settings is not None: _update_property_value(sessions_properties, 'files', files) elif editor_type == 'java': sessions_properties = [] # Java options else: sessions_properties = [] data = { 'name': name, 'uuid': str(uuid.uuid4()), 'description': description, 'sessions': [ { 'type': editor_type, 'properties': sessions_properties, 'id': None } ], 'selectedSnippet': editor_type, 'type': 'notebook' if is_notebook else 'query-%s' % editor_type, 'showHistory': True, 'isSaved': is_saved, 'onSuccessUrl': on_success_url, 'skipHistorify': skip_historify, 'isManaged': is_task, 'snippets': [ { 'status': status, 'id': str(uuid.uuid4()), 'statement_raw': statement, 'statement': statement, 'type': editor_type, 'wasBatchExecuted': batch_submit, 'lastExecuted': last_executed, 'properties': { 'files': [] if files is None else files, 'functions': [] if functions is None else functions, 'settings': [] if settings is None else settings }, 'name': name, 'database': database, 'result': {'handle':{}}, 'variables': [] } ] if not is_notebook else [] } if snippet_properties: data['snippets'][0]['properties'].update(snippet_properties) editor.data = json.dumps(data) return editor def make_notebook2(name='Browse', description='', is_saved=False, snippets=None): from notebook.connectors.hiveserver2 import HS2Api editor = Notebook() _snippets = [] for snippet in snippets: default_properties = { 'files': [], 'functions': [], 'settings': [] } default_properties.update(snippet['properties']) snippet['properties'] = default_properties _snippets.append(snippet) data = { 'name': name, 'uuid': str(uuid.uuid4()), 'type': 'notebook', 'description': description, 'sessions': [ { 'type': _snippet['type'], 'properties': HS2Api.get_properties(snippet['type']), 'id': None } for _snippet in _snippets # Non unique types currently ], 'selectedSnippet': _snippets[0]['type'], 'showHistory': False, 'isSaved': is_saved, 'snippets': [ { 'status': _snippet.get('status', 'ready'), 'id': str(uuid.uuid4()), 'statement_raw': _snippet.get('statement', ''), 'statement': _snippet.get('statement', ''), 'type': _snippet.get('type'), 'properties': _snippet['properties'], 'name': name, 'database': _snippet.get('database'), 'result': {'handle':{}}, 'variables': [] } for _snippet in _snippets ] } editor.data = json.dumps(data) return editor def import_saved_beeswax_query(bquery): design = bquery.get_design() return make_notebook( name=bquery.name, description=bquery.desc, editor_type=_convert_type(bquery.type, bquery.data), statement=design.hql_query, status='ready', files=design.file_resources, functions=design.functions, settings=design.settings, is_saved=True, database=design.database ) def import_saved_pig_script(pig_script): snippet_properties = {} if pig_script.dict.get('hadoopProperties'): snippet_properties['hadoopProperties'] = [] for prop in pig_script.dict.get('hadoopProperties'): snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value'))) if pig_script.dict.get('parameters'): snippet_properties['parameters'] = [] for param in pig_script.dict.get('parameters'): snippet_properties['parameters'].append("%s=%s" % (param.get('name'), param.get('value'))) if pig_script.dict.get('resources'): snippet_properties['resources'] = [] for resource in pig_script.dict.get('resources'): snippet_properties['resources'].append(resource.get('value')) notebook = make_notebook( name=pig_script.dict.get('name'), editor_type='pig', statement=pig_script.dict.get('script'), status='ready', snippet_properties=snippet_properties, is_saved=True ) # Remove files, functions, settings from snippet properties data = notebook.get_data() data['snippets'][0]['properties'].pop('files') data['snippets'][0]['properties'].pop('functions') data['snippets'][0]['properties'].pop('settings') notebook.data = json.dumps(data) return notebook def import_saved_mapreduce_job(wf): snippet_properties = {} node = wf.start.get_child('to') try: files = json.loads(node.files) for filepath in files: snippet_properties['files'].append({'type': 'file', 'path': filepath}) except ValueError, e: LOG.warn('Failed to parse files for mapreduce job design "%s".' % wf.name) snippet_properties['archives'] = [] try: archives = json.loads(node.archives) for filepath in archives: snippet_properties['archives'].append(filepath) except ValueError, e: LOG.warn('Failed to parse archives for mapreduce job design "%s".' % wf.name) snippet_properties['hadoopProperties'] = [] try: properties = json.loads(node.job_properties) if properties: for prop in properties: snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value'))) except ValueError, e: LOG.warn('Failed to parse job properties for mapreduce job design "%s".' % wf.name) snippet_properties['app_jar'] = node.jar_path notebook = make_notebook( name=wf.name, description=wf.description, editor_type='mapreduce', statement='', status='ready', snippet_properties=snippet_properties, is_saved=True ) # Remove functions, settings from snippet properties data = notebook.get_data() data['snippets'][0]['properties'].pop('functions') data['snippets'][0]['properties'].pop('settings') notebook.data = json.dumps(data) return notebook def import_saved_shell_job(wf): snippet_properties = {} node = wf.start.get_child('to') snippet_properties['command_path'] = node.command snippet_properties['arguments'] = [] snippet_properties['env_var'] = [] try: params = json.loads(node.params) if params: for param in params: if param['type'] == 'argument': snippet_properties['arguments'].append(param['value']) else: snippet_properties['env_var'].append(param['value']) except ValueError, e: LOG.warn('Failed to parse parameters for shell job design "%s".' % wf.name) snippet_properties['hadoopProperties'] = [] try: properties = json.loads(node.job_properties) if properties: for prop in properties: snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value'))) except ValueError, e: LOG.warn('Failed to parse job properties for shell job design "%s".' % wf.name) snippet_properties['files'] = [] try: files = json.loads(node.files) for filepath in files: snippet_properties['files'].append({'type': 'file', 'path': filepath}) except ValueError, e: LOG.warn('Failed to parse files for shell job design "%s".' % wf.name) snippet_properties['archives'] = [] try: archives = json.loads(node.archives) for archive in archives: snippet_properties['archives'].append(archive['name']) except ValueError, e: LOG.warn('Failed to parse archives for shell job design "%s".' % wf.name) snippet_properties['capture_output'] = node.capture_output notebook = make_notebook( name=wf.name, description=wf.description, editor_type='shell', statement='', status='ready', snippet_properties=snippet_properties, is_saved=True ) # Remove functions, settings from snippet properties data = notebook.get_data() data['snippets'][0]['properties'].pop('functions') data['snippets'][0]['properties'].pop('settings') notebook.data = json.dumps(data) return notebook def import_saved_java_job(wf): snippet_properties = {} node = wf.start.get_child('to') snippet_properties['app_jar'] = node.jar_path snippet_properties['class'] = node.main_class snippet_properties['args'] = node.args if node.args else '' snippet_properties['java_opts'] = node.java_opts if node.java_opts else '' snippet_properties['hadoopProperties'] = [] try: properties = json.loads(node.job_properties) if properties: for prop in properties: snippet_properties['hadoopProperties'].append("%s=%s" % (prop.get('name'), prop.get('value'))) except ValueError, e: LOG.warn('Failed to parse job properties for Java job design "%s".' % wf.name) snippet_properties['files'] = [] try: files = json.loads(node.files) for filepath in files: snippet_properties['files'].append({'type': 'file', 'path': filepath}) except ValueError, e: LOG.warn('Failed to parse files for Java job design "%s".' % wf.name) snippet_properties['archives'] = [] try: archives = json.loads(node.archives) for archive in archives: snippet_properties['archives'].append(archive['name']) except ValueError, e: LOG.warn('Failed to parse archives for Java job design "%s".' % wf.name) snippet_properties['capture_output'] = node.capture_output notebook = make_notebook( name=wf.name, description=wf.description, editor_type='java', statement='', status='ready', snippet_properties=snippet_properties, is_saved=True ) # Remove functions, settings from snippet properties data = notebook.get_data() data['snippets'][0]['properties'].pop('functions') data['snippets'][0]['properties'].pop('settings') notebook.data = json.dumps(data) return notebook def _convert_type(btype, bdata): from beeswax.models import HQL, IMPALA, RDBMS, SPARK if btype == HQL: return 'hive' elif btype == IMPALA: return 'impala' elif btype == RDBMS: data = json.loads(bdata) return data['query']['server'] elif btype == SPARK: # We should not import return 'spark' else: return 'hive' def _update_property_value(properties, key, value): """ Update property dict in list of properties where prop has "key": key, set "value": value """ for prop in properties: if prop['key'] == key: prop.update({'value': value})
from tapiriik.database import db, cachedb, redis from tapiriik.messagequeue import mq from tapiriik.services import Service, ServiceRecord, APIExcludeActivity, ServiceException, ServiceExceptionScope, ServiceWarning, UserException, UserExceptionType from tapiriik.settings import USER_SYNC_LOGS, DISABLED_SERVICES, WITHDRAWN_SERVICES from .activity_record import ActivityRecord, ActivityServicePrescence from datetime import datetime, timedelta from pymongo.read_preferences import ReadPreference import sys import os import socket import traceback import pprint import copy import random import logging import logging.handlers import pytz import kombu import json # Set this up seperate from the logger used in this scope, so services logging messages are caught and logged into user's files. _global_logger = logging.getLogger("tapiriik") _global_logger.setLevel(logging.DEBUG) logging_console_handler = logging.StreamHandler(sys.stdout) logging_console_handler.setLevel(logging.DEBUG) logging_console_handler.setFormatter(logging.Formatter('%(message)s')) _global_logger.addHandler(logging_console_handler) logger = logging.getLogger("tapiriik.sync.worker") def _formatExc(): try: exc_type, exc_value, exc_traceback = sys.exc_info() tb = exc_traceback while tb.tb_next: tb = tb.tb_next frame = tb.tb_frame locals_trimmed = [] for local_name, local_val in frame.f_locals.items(): value_full = pprint.pformat(local_val) if len(value_full) > 1000: value_full = value_full[:500] + "..." + value_full[-500:] locals_trimmed.append(str(local_name) + "=" + value_full) exc = '\n'.join(traceback.format_exception(exc_type, exc_value, exc_traceback)) + "\nLOCALS:\n" + '\n'.join(locals_trimmed) logger.exception("Service exception") return exc finally: del exc_traceback, exc_value, exc_type def _isWarning(exc): return issubclass(exc.__class__, ServiceWarning) # It's practically an ORM! def _packServiceException(step, e): res = {"Step": step, "Message": e.Message + "\n" + _formatExc(), "Block": e.Block, "Scope": e.Scope, "TriggerExhaustive": e.TriggerExhaustive, "Timestamp": datetime.utcnow()} if e.UserException: res["UserException"] = _packUserException(e.UserException) return res def _packUserException(userException): if userException: return {"Type": userException.Type, "Extra": userException.Extra, "InterventionRequired": userException.InterventionRequired, "ClearGroup": userException.ClearGroup} def _unpackUserException(raw): if not raw: return None if "UserException" in raw: raw = raw["UserException"] if not raw: return None if "Type" not in raw: return None return UserException(raw["Type"], extra=raw["Extra"], intervention_required=raw["InterventionRequired"], clear_group=raw["ClearGroup"]) class Sync: SyncInterval = timedelta(hours=1) SyncIntervalJitter = timedelta(minutes=5) MinimumSyncInterval = timedelta(seconds=30) MaximumIntervalBeforeExhaustiveSync = timedelta(days=14) # Based on the general page size of 50 activites, this would be >3/day... def ScheduleImmediateSync(user, exhaustive=None): if exhaustive is None: db.users.update({"_id": user["_id"]}, {"$set": {"NextSynchronization": datetime.utcnow()}}) else: db.users.update({"_id": user["_id"]}, {"$set": {"NextSynchronization": datetime.utcnow(), "NextSyncIsExhaustive": exhaustive}}) def SetNextSyncIsExhaustive(user, exhaustive=False): db.users.update({"_id": user["_id"]}, {"$set": {"NextSyncIsExhaustive": exhaustive}}) def InitializeWorkerBindings(): Sync._channel = mq.channel() Sync._exchange = kombu.Exchange("tapiriik-users", type="direct")(Sync._channel) Sync._exchange.declare() Sync._global_queue = kombu.Queue("tapiriik-users")(Sync._channel) Sync._host_queue = kombu.Queue("tapiriik-users-%s" % socket.gethostname())(Sync._channel) Sync._global_queue.declare() Sync._host_queue.declare() # Bind to worker-specific and general routing keys Sync._global_queue.bind_to(exchange="tapiriik-users", routing_key="") Sync._host_queue.bind_to(exchange="tapiriik-users", routing_key=socket.gethostname()) def PerformGlobalSync(heartbeat_callback=None, version=None, max_users=None): def _callback(body, message): Sync._consumeSyncTask(body, message, heartbeat_callback, version) Sync._consumer = kombu.Consumer( channel=Sync._channel, queues=[Sync._host_queue, Sync._global_queue], callbacks=[_callback], auto_declare=False ) Sync._consumer.qos(prefetch_count=1, apply_global=False) Sync._consumer.consume() for _ in kombu.eventloop(mq, limit=max_users): pass def _consumeSyncTask(body, message, heartbeat_callback_direct, version): from tapiriik.auth import User user_id = body["user_id"] user = User.Get(user_id) if user is None: logger.warning("Could not find user %s - bailing" % user_id) message.ack() # Otherwise the entire thing grinds to a halt return if body["generation"] != user.get("QueuedGeneration", None): # QueuedGeneration being different means they've gone through sync_scheduler since this particular message was queued # So, discard this and wait for that message to surface # Should only happen when I manually requeue people logger.warning("Queue generation mismatch for %s - bailing" % user_id) message.ack() return def heartbeat_callback(state): heartbeat_callback_direct(state, user_id) syncStart = datetime.utcnow() # Always to an exhaustive sync if there were errors # Sometimes services report that uploads failed even when they succeeded. # If a partial sync was done, we'd be assuming that the accounts were consistent past the first page # e.g. If an activity failed to upload far in the past, it would never be attempted again. # So we need to verify the full state of the accounts. # But, we can still do a partial sync if there are *only* blocking errors # In these cases, the block will protect that service from being improperly manipulated (though tbqh I can't come up with a situation where this would happen, it's more of a performance thing). # And, when the block is cleared, NextSyncIsExhaustive is set. exhaustive = "NextSyncIsExhaustive" in user and user["NextSyncIsExhaustive"] is True if ("ForcingExhaustiveSyncErrorCount" not in user and "NonblockingSyncErrorCount" in user and user["NonblockingSyncErrorCount"] > 0) or \ ("ForcingExhaustiveSyncErrorCount" in user and user["ForcingExhaustiveSyncErrorCount"] > 0): exhaustive = True result = None try: result = Sync.PerformUserSync(user, exhaustive, heartbeat_callback=heartbeat_callback) finally: nextSync = None if User.HasActivePayment(user): if User.GetConfiguration(user)["suppress_auto_sync"]: logger.info("Not scheduling auto sync for paid user") else: nextSync = datetime.utcnow() + Sync.SyncInterval + timedelta(seconds=random.randint(-Sync.SyncIntervalJitter.total_seconds(), Sync.SyncIntervalJitter.total_seconds())) if result: if result.ForceNextSync: logger.info("Forcing next sync at %s" % result.ForceNextSync) nextSync = result.ForceNextSync scheduling_result = db.users.update( { "_id": user["_id"] }, { "$set": { "NextSynchronization": nextSync, "LastSynchronization": datetime.utcnow(), "LastSynchronizationVersion": version }, "$unset": { "NextSyncIsExhaustive": None, "QueuedAt": None # Set by sync_scheduler when the record enters the MQ } }) reschedule_confirm_message = "User reschedule for %s returned %s" % (nextSync, scheduling_result) # Tack this on the end of the log file since otherwise it's lost for good (blegh, but nicer than moving logging out of the sync task?) user_log = open(USER_SYNC_LOGS + str(user["_id"]) + ".log", "a+") user_log.write("\n%s\n" % reschedule_confirm_message) user_log.close() logger.debug(reschedule_confirm_message) syncTime = (datetime.utcnow() - syncStart).total_seconds() db.sync_worker_stats.insert({"Timestamp": datetime.utcnow(), "Worker": os.getpid(), "Host": socket.gethostname(), "TimeTaken": syncTime}) message.ack() def PerformUserSync(user, exhaustive=False, heartbeat_callback=None): return SynchronizationTask(user).Run(exhaustive=exhaustive, heartbeat_callback=heartbeat_callback) class SynchronizationTask: _logFormat = '[%(levelname)-8s] %(asctime)s (%(name)s:%(lineno)d) %(message)s' _logDateFormat = '%Y-%m-%d %H:%M:%S' def __init__(self, user): self.user = user def _lockUser(self): db.users.update({"_id": self.user["_id"]}, {"$set": {"SynchronizationWorker": os.getpid(), "SynchronizationHost": socket.gethostname(), "SynchronizationStartTime": datetime.utcnow()}}) def _unlockUser(self): unlock_result = db.users.update( { "_id": self.user["_id"] }, { "$unset": { "SynchronizationWorker": None } }) logger.debug("User unlock returned %s" % unlock_result) def _loadServiceData(self): self._connectedServiceIds = [x["ID"] for x in self.user["ConnectedServices"]] self._serviceConnections = [ServiceRecord(x) for x in db.connections.find({"_id": {"$in": self._connectedServiceIds}})] def _updateSyncProgress(self, step, progress): db.users.update({"_id": self.user["_id"]}, {"$set": {"SynchronizationProgress": progress, "SynchronizationStep": step}}) def _initializeUserLogging(self): self._logging_file_handler = logging.handlers.RotatingFileHandler(USER_SYNC_LOGS + str(self.user["_id"]) + ".log", maxBytes=0, backupCount=5, encoding="utf-8") self._logging_file_handler.setFormatter(logging.Formatter(self._logFormat, self._logDateFormat)) self._logging_file_handler.doRollover() _global_logger.addHandler(self._logging_file_handler) def _closeUserLogging(self): _global_logger.removeHandler(self._logging_file_handler) self._logging_file_handler.flush() self._logging_file_handler.close() def _loadExtendedAuthData(self): self._extendedAuthDetails = list(cachedb.extendedAuthDetails.find({"ID": {"$in": self._connectedServiceIds}})) def _destroyExtendedAuthData(self): cachedb.extendedAuthDetails.remove({"ID": {"$in": self._connectedServiceIds}}) def _initializePersistedSyncErrorsAndExclusions(self): self._syncErrors = {} self._hasTransientSyncErrors = {} self._syncExclusions = {} for conn in self._serviceConnections: if hasattr(conn, "SyncErrors"): # Remove non-blocking errors self._syncErrors[conn._id] = [x for x in conn.SyncErrors if "Block" in x and x["Block"]] self._hasTransientSyncErrors[conn._id] = len(self._syncErrors[conn._id]) != len(conn.SyncErrors) del conn.SyncErrors else: self._syncErrors[conn._id] = [] # Remove temporary exclusions (live tracking etc). self._syncExclusions[conn._id] = dict((k, v) for k, v in (conn.ExcludedActivities if conn.ExcludedActivities else {}).items() if v["Permanent"]) if conn.ExcludedActivities: del conn.ExcludedActivities # Otherwise the exception messages get really, really, really huge and break mongodb. def _writeBackSyncErrorsAndExclusions(self): nonblockingSyncErrorsCount = 0 forcingExhaustiveSyncErrorsCount = 0 blockingSyncErrorsCount = 0 syncExclusionCount = 0 for conn in self._serviceConnections: update_values = { "$set": { "SyncErrors": self._syncErrors[conn._id], "ExcludedActivities": self._syncExclusions[conn._id] } } if not self._isServiceExcluded(conn) and not self._shouldPersistServiceTrigger(conn): # Only reset the trigger if we succesfully got through the entire sync without bailing on this particular connection update_values["$unset"] = {"TriggerPartialSync": None} db.connections.update({"_id": conn._id}, update_values) nonblockingSyncErrorsCount += len([x for x in self._syncErrors[conn._id] if "Block" not in x or not x["Block"]]) blockingSyncErrorsCount += len([x for x in self._syncErrors[conn._id] if "Block" in x and x["Block"]]) forcingExhaustiveSyncErrorsCount += len([x for x in self._syncErrors[conn._id] if "Block" in x and x["Block"] and "TriggerExhaustive" in x and x["TriggerExhaustive"]]) syncExclusionCount += len(self._syncExclusions[conn._id].items()) db.users.update({"_id": self.user["_id"]}, {"$set": {"NonblockingSyncErrorCount": nonblockingSyncErrorsCount, "BlockingSyncErrorCount": blockingSyncErrorsCount, "ForcingExhaustiveSyncErrorCount": forcingExhaustiveSyncErrorsCount, "SyncExclusionCount": syncExclusionCount}}) def _writeBackActivityRecords(self): def _activityPrescences(prescences): return dict([(svcId if svcId else "", { "Processed": presc.ProcessedTimestamp, "Synchronized": presc.SynchronizedTimestamp, "Exception": _packUserException(presc.UserException) }) for svcId, presc in prescences.items()]) self._activityRecords.sort(key=lambda x: x.StartTime.replace(tzinfo=None), reverse=True) composed_records = [ { "StartTime": x.StartTime, "EndTime": x.EndTime, "Type": x.Type, "Name": x.Name, "Notes": x.Notes, "Private": x.Private, "Stationary": x.Stationary, "Distance": x.Distance, "UIDs": list(x.UIDs), "Prescence": _activityPrescences(x.PresentOnServices), "Abscence": _activityPrescences(x.NotPresentOnServices), "FailureCounts": x.FailureCounts } for x in self._activityRecords ] db.activity_records.update( {"UserID": self.user["_id"]}, { "$set": { "UserID": self.user["_id"], "Activities": composed_records } }, upsert=True ) def _initializeActivityRecords(self): raw_records = db.activity_records.find_one({"UserID": self.user["_id"]}) self._activityRecords = [] if not raw_records: return else: raw_records = raw_records["Activities"] for raw_record in raw_records: if "UIDs" not in raw_record: continue # From the few days where this was rolled out without this key... rec = ActivityRecord(raw_record) rec.UIDs = set(rec.UIDs) # Did I mention I should really start using an ORM-type deal any day now? for svc, absent in rec.Abscence.items(): rec.NotPresentOnServices[svc] = ActivityServicePrescence(absent["Processed"], absent["Synchronized"], _unpackUserException(absent["Exception"])) for svc, present in rec.Prescence.items(): rec.PresentOnServices[svc] = ActivityServicePrescence(present["Processed"], present["Synchronized"], _unpackUserException(present["Exception"])) del rec.Prescence del rec.Abscence rec.Touched = False self._activityRecords.append(rec) def _findOrCreateActivityRecord(self, activity): for record in self._activityRecords: if record.UIDs & activity.UIDs: record.Touched = True return record record = ActivityRecord.FromActivity(activity) record.Touched = True self._activityRecords.append(record) return record def _dropUntouchedActivityRecords(self): self._activityRecords[:] = [x for x in self._activityRecords if x.Touched] def _persistServiceTrigger(self, serviceRecord): self._persistTriggerServices[serviceRecord._id] = True def _shouldPersistServiceTrigger(self, serviceRecord): return serviceRecord._id in self._persistTriggerServices def _excludeService(self, serviceRecord, userException): self._excludedServices[serviceRecord._id] = userException if userException else None def _isServiceExcluded(self, serviceRecord): return serviceRecord._id in self._excludedServices def _getServiceExclusionUserException(self, serviceRecord): return self._excludedServices[serviceRecord._id] def _determineRecipientServices(self, activity): recipientServices = [] for conn in self._serviceConnections: if not conn.Service.ReceivesActivities: # Nope. continue if conn._id in activity.ServiceDataCollection: # The activity record is updated earlier for these, blegh. continue elif hasattr(conn, "SynchronizedActivities") and len([x for x in activity.UIDs if x in conn.SynchronizedActivities]): continue elif activity.Type not in conn.Service.SupportedActivities: logger.debug("\t...%s doesn't support type %s" % (conn.Service.ID, activity.Type)) activity.Record.MarkAsNotPresentOn(conn, UserException(UserExceptionType.TypeUnsupported)) else: recipientServices.append(conn) return recipientServices def _coalesceDatetime(self, a, b, knownTz=None): """ Returns the most informative (TZ-wise) datetime of those provided - defaulting to the first if they are equivalently descriptive """ if not b: if knownTz and a and not a.tzinfo: return a.replace(tzinfo=knownTz) return a if not a: if knownTz and b and not b.tzinfo: return b.replace(tzinfo=knownTz) return b if a.tzinfo and not b.tzinfo: return a elif b.tzinfo and not a.tzinfo: return b else: if knownTz and not a.tzinfo: return a.replace(tzinfo=knownTz) return a def _accumulateActivities(self, conn, svcActivities, no_add=False): # Yep, abs() works on timedeltas activityStartLeeway = timedelta(minutes=3) activityStartTZOffsetLeeway = timedelta(seconds=10) timezoneErrorPeriod = timedelta(hours=38) from tapiriik.services.interchange import ActivityType for act in svcActivities: act.UIDs = set([act.UID]) if not hasattr(act, "ServiceDataCollection"): act.ServiceDataCollection = {} if hasattr(act, "ServiceData") and act.ServiceData is not None: act.ServiceDataCollection[conn._id] = act.ServiceData del act.ServiceData if act.TZ and not hasattr(act.TZ, "localize"): raise ValueError("Got activity with TZ type " + str(type(act.TZ)) + " instead of a pytz timezone") # Used to ensureTZ() right here - doubt it's needed any more? existElsewhere = [ x for x in self._activities if ( # Identical x.UID == act.UID or # Check to see if the self._activities are reasonably close together to be considered duplicate (x.StartTime is not None and act.StartTime is not None and (act.StartTime.tzinfo is not None) == (x.StartTime.tzinfo is not None) and abs(act.StartTime-x.StartTime) < activityStartLeeway ) or # Try comparing the time as if it were TZ-aware and in the expected TZ (this won't actually change the value of the times being compared) (x.StartTime is not None and act.StartTime is not None and (act.StartTime.tzinfo is not None) != (x.StartTime.tzinfo is not None) and abs(act.StartTime.replace(tzinfo=None)-x.StartTime.replace(tzinfo=None)) < activityStartLeeway ) or # Sometimes wacky stuff happens and we get two activities with the same mm:ss but different hh, because of a TZ issue somewhere along the line. # So, we check for any activities +/- 14, wait, 38 hours that have the same minutes and seconds values. # (14 hours because Kiribati, and later, 38 hours because of some really terrible import code that existed on a service that shall not be named). # There's a very low chance that two activities in this period would intersect and be merged together. # But, given the fact that most users have maybe 0.05 activities per this period, it's an acceptable tradeoff. (x.StartTime is not None and act.StartTime is not None and abs(act.StartTime.replace(tzinfo=None)-x.StartTime.replace(tzinfo=None)) < timezoneErrorPeriod and abs(act.StartTime.replace(tzinfo=None).replace(hour=0) - x.StartTime.replace(tzinfo=None).replace(hour=0)) < activityStartTZOffsetLeeway ) or # Similarly, for half-hour time zones (there are a handful of quarter-hour ones, but I've got to draw a line somewhere, even if I revise it several times) (x.StartTime is not None and act.StartTime is not None and abs(act.StartTime.replace(tzinfo=None)-x.StartTime.replace(tzinfo=None)) < timezoneErrorPeriod and abs(act.StartTime.replace(tzinfo=None).replace(hour=0) - x.StartTime.replace(tzinfo=None).replace(hour=0)) > timedelta(minutes=30) - (activityStartTZOffsetLeeway / 2) and abs(act.StartTime.replace(tzinfo=None).replace(hour=0) - x.StartTime.replace(tzinfo=None).replace(hour=0)) < timedelta(minutes=30) + (activityStartTZOffsetLeeway / 2) ) ) and # Prevents closely-spaced activities of known different type from being lumped together - esp. important for manually-enetered ones (x.Type == ActivityType.Other or act.Type == ActivityType.Other or x.Type == act.Type or ActivityType.AreVariants([act.Type, x.Type])) ] if len(existElsewhere) > 0: existingActivity = existElsewhere[0] # we don't merge the exclude values here, since at this stage the services have the option of just not returning those activities if act.TZ is not None and existingActivity.TZ is None: existingActivity.TZ = act.TZ existingActivity.DefineTZ() existingActivity.FallbackTZ = existingActivity.FallbackTZ if existingActivity.FallbackTZ else act.FallbackTZ # tortuous merging logic is tortuous existingActivity.StartTime = self._coalesceDatetime(existingActivity.StartTime, act.StartTime) existingActivity.EndTime = self._coalesceDatetime(existingActivity.EndTime, act.EndTime, knownTz=existingActivity.StartTime.tzinfo) existingActivity.Name = existingActivity.Name if existingActivity.Name else act.Name existingActivity.Notes = existingActivity.Notes if existingActivity.Notes else act.Notes existingActivity.Laps = existingActivity.Laps if len(existingActivity.Laps) > len(act.Laps) else act.Laps existingActivity.Type = ActivityType.PickMostSpecific([existingActivity.Type, act.Type]) existingActivity.Private = existingActivity.Private or act.Private existingActivity.Device = existingActivity.Device or act.Device if act.Stationary is not None: if existingActivity.Stationary is None: existingActivity.Stationary = act.Stationary else: existingActivity.Stationary = existingActivity.Stationary and act.Stationary # Let's be optimistic here else: pass # Nothing to do - existElsewhere is either more speicifc or equivalently indeterminate if act.GPS is not None: if existingActivity.GPS is None: existingActivity.GPS = act.GPS else: existingActivity.GPS = act.GPS or existingActivity.GPS else: pass # Similarly existingActivity.Stats.coalesceWith(act.Stats) serviceDataCollection = dict(act.ServiceDataCollection) serviceDataCollection.update(existingActivity.ServiceDataCollection) existingActivity.ServiceDataCollection = serviceDataCollection existingActivity.UIDs |= act.UIDs # I think this is merited act.UIDs = existingActivity.UIDs # stop the circular inclusion, not that it matters continue if not no_add: self._activities.append(act) def _determineEligibleRecipientServices(self, activity, recipientServices): from tapiriik.auth import User eligibleServices = [] for destinationSvcRecord in recipientServices: if self._isServiceExcluded(destinationSvcRecord): logger.info("\t\tExcluded " + destinationSvcRecord.Service.ID) activity.Record.MarkAsNotPresentOn(destinationSvcRecord, self._getServiceExclusionUserException(destinationSvcRecord)) continue # we don't know for sure if it needs to be uploaded, hold off for now flowException = True sources = [[y for y in self._serviceConnections if y._id == x][0] for x in activity.ServiceDataCollection.keys()] for src in sources: if src.Service.ID in WITHDRAWN_SERVICES: continue # They can't see this service to change the configuration. if not User.CheckFlowException(self.user, src, destinationSvcRecord): flowException = False break if flowException: logger.info("\t\tFlow exception for " + destinationSvcRecord.Service.ID) activity.Record.MarkAsNotPresentOn(destinationSvcRecord, UserException(UserExceptionType.FlowException)) continue destSvc = destinationSvcRecord.Service if destSvc.RequiresConfiguration(destinationSvcRecord): logger.info("\t\t" + destSvc.ID + " not configured") activity.Record.MarkAsNotPresentOn(destinationSvcRecord, UserException(UserExceptionType.NotConfigured)) continue # not configured, so we won't even try if not destSvc.ReceivesStationaryActivities and activity.Stationary: logger.info("\t\t" + destSvc.ID + " doesn't receive stationary activities") activity.Record.MarkAsNotPresentOn(destinationSvcRecord, UserException(UserExceptionType.StationaryUnsupported)) continue # Missing this originally, no wonder... # ReceivesNonGPSActivitiesWithOtherSensorData doesn't matter if the activity is stationary. # (and the service accepts stationary activities - guaranteed immediately above) if not activity.Stationary: if not (destSvc.ReceivesNonGPSActivitiesWithOtherSensorData or activity.GPS is not False): logger.info("\t\t" + destSvc.ID + " doesn't receive non-GPS activities") activity.Record.MarkAsNotPresentOn(destinationSvcRecord, UserException(UserExceptionType.NonGPSUnsupported)) continue if activity.Record.GetFailureCount(destinationSvcRecord) >= destSvc.UploadRetryCount: logger.info("\t\t" + destSvc.ID + " has exceeded upload retry count") # There's already an error in the activity Record, no need to add anything more here continue eligibleServices.append(destinationSvcRecord) return eligibleServices def _accumulateExclusions(self, serviceRecord, exclusions): if type(exclusions) is not list: exclusions = [exclusions] for exclusion in exclusions: identifier = exclusion.Activity.UID if exclusion.Activity else exclusion.ExternalActivityID if not identifier: raise ValueError("Activity excluded with no identifying information") identifier = str(identifier).replace(".", "_") self._syncExclusions[serviceRecord._id][identifier] = {"Message": exclusion.Message, "Activity": str(exclusion.Activity) if exclusion.Activity else None, "ExternalActivityID": exclusion.ExternalActivityID, "Permanent": exclusion.Permanent, "Effective": datetime.utcnow(), "UserException": _packUserException(exclusion.UserException)} def _ensurePartialSyncPollingSubscription(self, conn): if conn.Service.PartialSyncRequiresTrigger and not conn.PartialSyncTriggerSubscribed: if conn.Service.RequiresExtendedAuthorizationDetails and not conn.ExtendedAuthorization: logger.info("No ext auth details, cannot subscribe") return # We (probably) can't subscribe unless we have their credentials. May need to change this down the road. try: conn.Service.SubscribeToPartialSyncTrigger(conn) except ServiceException as e: logger.exception("Failure while subscribing to partial sync trigger") def _primeExtendedAuthDetails(self, conn): if conn.Service.RequiresExtendedAuthorizationDetails: if not hasattr(conn, "ExtendedAuthorization") or not conn.ExtendedAuthorization: extAuthDetails = [x["ExtendedAuthorization"] for x in self._extendedAuthDetails if x["ID"] == conn._id] if not len(extAuthDetails): conn.ExtendedAuthorization = None return # The connection never gets saved in full again, so we can sub these in here at no risk. conn.ExtendedAuthorization = extAuthDetails[0] def _downloadActivityList(self, conn, exhaustive, no_add=False): svc = conn.Service # Bail out as appropriate for the entire account (_syncErrors contains only blocking errors at this point) if [x for x in self._syncErrors[conn._id] if x["Scope"] == ServiceExceptionScope.Account]: raise SynchronizationCompleteException() # ...and for this specific service if [x for x in self._syncErrors[conn._id] if x["Scope"] == ServiceExceptionScope.Service]: logger.info("Service %s is blocked:" % conn.Service.ID) self._excludeService(conn, _unpackUserException([x for x in self._syncErrors[conn._id] if x["Scope"] == ServiceExceptionScope.Service][0])) return if svc.ID in DISABLED_SERVICES or svc.ID in WITHDRAWN_SERVICES: logger.info("Service %s is widthdrawn" % conn.Service.ID) self._excludeService(conn, UserException(UserExceptionType.Other)) return if svc.RequiresExtendedAuthorizationDetails: if not conn.ExtendedAuthorization: logger.info("No extended auth details for " + svc.ID) self._excludeService(conn, UserException(UserExceptionType.MissingCredentials)) return try: logger.info("\tRetrieving list from " + svc.ID) svcActivities, svcExclusions = svc.DownloadActivityList(conn, exhaustive) except (ServiceException, ServiceWarning) as e: # Special-case rate limiting errors thrown during listing # Otherwise, things will melt down when the limit is reached # (lots of users will hit this error, then be marked for full synchronization later) # (but that's not really required) # Though we don't want to play with things if this exception needs to take the place of an earlier, more significant one # # I had previously removed this because I forgot that TriggerExhaustive defaults to true - this exception was *un*setting it # The issue prompting that change stemmed more from the fact that the rate-limiting errors were being marked as blocking, # ...not that they were getting marked as *not* triggering exhaustive synchronization if e.UserException and e.UserException.Type == UserExceptionType.RateLimited: e.TriggerExhaustive = conn._id in self._hasTransientSyncErrors and self._hasTransientSyncErrors[conn._id] self._syncErrors[conn._id].append(_packServiceException(SyncStep.List, e)) self._excludeService(conn, e.UserException) if not _isWarning(e): return except Exception as e: self._syncErrors[conn._id].append({"Step": SyncStep.List, "Message": _formatExc()}) self._excludeService(conn, UserException(UserExceptionType.ListingError)) return self._accumulateExclusions(conn, svcExclusions) self._accumulateActivities(conn, svcActivities, no_add=no_add) def _estimateFallbackTZ(self, activities): from collections import Counter # With the hope that the majority of the activity records returned will have TZs, and the user's current TZ will constitute the majority. TZOffsets = [x.StartTime.utcoffset().total_seconds() / 60 for x in activities if x.TZ is not None] mode = Counter(TZOffsets).most_common(1) if not len(mode): if "Timezone" in self.user: return pytz.timezone(self.user["Timezone"]) return None return pytz.FixedOffset(mode[0][0]) def _applyFallbackTZ(self): # Attempt to assign fallback TZs to all stationary/potentially-stationary activities, since we may not be able to determine TZ any other way. fallbackTZ = self._estimateFallbackTZ(self._activities) if fallbackTZ: logger.info("Setting fallback TZs to %s" % fallbackTZ ) for act in self._activities: act.FallbackTZ = fallbackTZ def _updateSynchronizedActivities(self, activity): # Locally mark this activity as present on the appropriate services. # These needs to happen regardless of whether the activity is going to be synchronized. # Before, I had moved this under all the eligibility/recipient checks, but that could cause persistent duplicate self._activities when the user had already manually uploaded the same activity to multiple sites. updateServicesWithExistingActivity = False for serviceWithExistingActivityId in activity.ServiceDataCollection.keys(): serviceWithExistingActivity = [x for x in self._serviceConnections if x._id == serviceWithExistingActivityId][0] if not hasattr(serviceWithExistingActivity, "SynchronizedActivities") or not (activity.UIDs <= set(serviceWithExistingActivity.SynchronizedActivities)): updateServicesWithExistingActivity = True break if updateServicesWithExistingActivity: logger.debug("\t\tUpdating SynchronizedActivities") db.connections.update({"_id": {"$in": list(activity.ServiceDataCollection.keys())}}, {"$addToSet": {"SynchronizedActivities": {"$each": list(activity.UIDs)}}}, multi=True) def _updateActivityRecordInitialPrescence(self, activity): for connWithExistingActivityId in activity.ServiceDataCollection.keys(): connWithExistingActivity = [x for x in self._serviceConnections if x._id == connWithExistingActivityId][0] activity.Record.MarkAsPresentOn(connWithExistingActivity) for conn in self._serviceConnections: if hasattr(conn, "SynchronizedActivities") and len([x for x in activity.UIDs if x in conn.SynchronizedActivities]): activity.Record.MarkAsPresentOn(conn) def _syncActivityRedisKey(user): return "recent-sync:%s" % user["_id"] def _pushRecentSyncActivity(self, activity, destinations): key = SynchronizationTask._syncActivityRedisKey(self.user) redis.lpush(key, json.dumps({"Name": activity.Name, "StartTime": activity.StartTime.isoformat(), "Type": activity.Type, "Timestamp": datetime.utcnow().isoformat(), "Destinations": destinations})) redis.ltrim(key, 0, 4) # Only keep 5 def RecentSyncActivity(user): return [json.loads(x.decode("UTF-8")) for x in redis.lrange(SynchronizationTask._syncActivityRedisKey(user), 0, 4)] def _downloadActivity(self, activity): act = None actAvailableFromSvcIds = activity.ServiceDataCollection.keys() actAvailableFromSvcs = [[x for x in self._serviceConnections if x._id == dlSvcRecId][0] for dlSvcRecId in actAvailableFromSvcIds] servicePriorityList = Service.PreferredDownloadPriorityList() actAvailableFromSvcs.sort(key=lambda x: servicePriorityList.index(x.Service)) # TODO: redo this, it was completely broken: # Prefer retrieving the activity from its original source. for dlSvcRecord in actAvailableFromSvcs: dlSvc = dlSvcRecord.Service logger.info("\tfrom " + dlSvc.ID) if activity.UID in self._syncExclusions[dlSvcRecord._id]: activity.Record.MarkAsNotPresentOtherwise(_unpackUserException(self._syncExclusions[dlSvcRecord._id][activity.UID])) logger.info("\t\t...has activity exclusion logged") continue if self._isServiceExcluded(dlSvcRecord): activity.Record.MarkAsNotPresentOtherwise(self._getServiceExclusionUserException(dlSvcRecord)) logger.info("\t\t...service became excluded after listing") # Because otherwise we'd never have been trying to download from it in the first place. continue if activity.Record.GetFailureCount(dlSvcRecord) >= dlSvc.DownloadRetryCount: # We don't re-call MarkAsNotPresentOtherwise here # ...since its existing value will be the more illuminating as to the error # (and we can just check the failure count if we want to know if it's being ignored) logger.info("\t\t...download retry count exceeded") continue workingCopy = copy.copy(activity) # we can hope # Load in the service data in the same place they left it. workingCopy.ServiceData = workingCopy.ServiceDataCollection[dlSvcRecord._id] if dlSvcRecord._id in workingCopy.ServiceDataCollection else None try: workingCopy = dlSvc.DownloadActivity(dlSvcRecord, workingCopy) except (ServiceException, ServiceWarning) as e: if not _isWarning(e): # Persist the exception if we just exceeded the failure count # (but not if a more useful blocking exception was provided) activity.Record.IncrementFailureCount(dlSvcRecord) if activity.Record.GetFailureCount(dlSvcRecord) >= dlSvc.DownloadRetryCount and not e.Block and (not e.UserException or e.UserException.Type != UserExceptionType.RateLimited): e.Block = True e.Scope = ServiceExceptionScope.Activity self._syncErrors[dlSvcRecord._id].append(_packServiceException(SyncStep.Download, e)) if e.Block and e.Scope == ServiceExceptionScope.Service: # I can't imagine why the same would happen at the account level, so there's no behaviour to immediately abort the sync in that case. self._excludeService(dlSvcRecord, e.UserException) if not _isWarning(e): activity.Record.MarkAsNotPresentOtherwise(e.UserException) continue except APIExcludeActivity as e: logger.info("\t\texcluded by service: %s" % e.Message) e.Activity = workingCopy self._accumulateExclusions(dlSvcRecord, e) activity.Record.MarkAsNotPresentOtherwise(e.UserException) continue except Exception as e: packed_exc = {"Step": SyncStep.Download, "Message": _formatExc()} activity.Record.IncrementFailureCount(dlSvcRecord) if activity.Record.GetFailureCount(dlSvcRecord) >= dlSvc.DownloadRetryCount: # Blegh, should just make packServiceException work with this packed_exc["Block"] = True packed_exc["Scope"] = ServiceExceptionScope.Activity self._syncErrors[dlSvcRecord._id].append(packed_exc) activity.Record.MarkAsNotPresentOtherwise(UserException(UserExceptionType.DownloadError)) continue activity.Record.ResetFailureCount(dlSvcRecord) if workingCopy.Private and not dlSvcRecord.GetConfiguration()["sync_private"]: logger.info("\t\t...is private and restricted from sync") # Sync exclusion instead? activity.Record.MarkAsNotPresentOtherwise(UserException(UserExceptionType.Private)) continue try: workingCopy.CheckSanity() except: logger.info("\t\t...failed sanity check") self._accumulateExclusions(dlSvcRecord, APIExcludeActivity("Sanity check failed " + _formatExc(), activity=workingCopy, user_exception=UserException(UserExceptionType.SanityError))) activity.Record.MarkAsNotPresentOtherwise(UserException(UserExceptionType.SanityError)) continue else: act = workingCopy act.SourceConnection = dlSvcRecord break # succesfully got the activity + passed sanity checks, can stop now # If nothing was downloaded at this point, the activity record will show the most recent error - which is fine enough, since only one service is needed to get the activity. return act, dlSvc def _uploadActivity(self, activity, destinationServiceRec): destSvc = destinationServiceRec.Service try: return destSvc.UploadActivity(destinationServiceRec, activity) except (ServiceException, ServiceWarning) as e: if not _isWarning(e): activity.Record.IncrementFailureCount(destinationServiceRec) # The rate-limiting special case here is so that users don't get stranded due to rate limiting issues outside of their control if activity.Record.GetFailureCount(destinationServiceRec) >= destSvc.UploadRetryCount and not e.Block and (not e.UserException or e.UserException.Type != UserExceptionType.RateLimited): e.Block = True e.Scope = ServiceExceptionScope.Activity self._syncErrors[destinationServiceRec._id].append(_packServiceException(SyncStep.Upload, e)) if e.Block and e.Scope == ServiceExceptionScope.Service: # Similarly, no behaviour to immediately abort the sync if an account-level exception is raised self._excludeService(destinationServiceRec, e.UserException) if not _isWarning(e): activity.Record.MarkAsNotPresentOn(destinationServiceRec, e.UserException if e.UserException else UserException(UserExceptionType.UploadError)) raise UploadException() except Exception as e: packed_exc = {"Step": SyncStep.Upload, "Message": _formatExc()} activity.Record.IncrementFailureCount(destinationServiceRec) if activity.Record.GetFailureCount(destinationServiceRec) >= destSvc.UploadRetryCount: packed_exc["Block"] = True packed_exc["Scope"] = ServiceExceptionScope.Activity self._syncErrors[destinationServiceRec._id].append(packed_exc) activity.Record.MarkAsNotPresentOn(destinationServiceRec, UserException(UserExceptionType.UploadError)) raise UploadException() activity.Record.ResetFailureCount(destinationServiceRec) def Run(self, exhaustive=False, null_next_sync_on_unlock=False, heartbeat_callback=None): from tapiriik.auth import User from tapiriik.services.interchange import ActivityStatisticUnit if len(self.user["ConnectedServices"]) <= 1: return # Done and done! sync_result = SynchronizationTaskResult() self._user_config = User.GetConfiguration(self.user) # Mark this user as in-progress. self._lockUser() # Reset their progress self._updateSyncProgress(SyncStep.List, 0) self._initializeUserLogging() logger.info("Beginning sync for " + str(self.user["_id"]) + "(exhaustive: " + str(exhaustive) + ")") # Sets up serviceConnections self._loadServiceData() self._loadExtendedAuthData() self._activities = [] self._excludedServices = {} self._deferredServices = [] self._persistTriggerServices = {} self._initializePersistedSyncErrorsAndExclusions() self._initializeActivityRecords() try: try: for conn in self._serviceConnections: # If we're not going to be doing anything anyways, stop now if len(self._serviceConnections) - len(self._excludedServices) <= 1: raise SynchronizationCompleteException() self._primeExtendedAuthDetails(conn) logger.info("Ensuring partial sync poll subscription") self._ensurePartialSyncPollingSubscription(conn) if not exhaustive and conn.Service.PartialSyncRequiresTrigger and "TriggerPartialSync" not in conn.__dict__ and not conn.Service.ShouldForcePartialSyncTrigger(conn): logger.info("Service %s has not been triggered" % conn.Service.ID) self._deferredServices.append(conn._id) continue if heartbeat_callback: heartbeat_callback(SyncStep.List) self._updateSyncProgress(SyncStep.List, conn.Service.ID) self._downloadActivityList(conn, exhaustive) self._applyFallbackTZ() # Makes reading the logs much easier. self._activities = sorted(self._activities, key=lambda v: v.StartTime.replace(tzinfo=None), reverse=True) totalActivities = len(self._activities) processedActivities = 0 for activity in self._activities: logger.info(str(activity) + " " + str(activity.UID[:3]) + " from " + str([[y.Service.ID for y in self._serviceConnections if y._id == x][0] for x in activity.ServiceDataCollection.keys()])) logger.info(" Name: %s Notes: %s Distance: %s%s" % (activity.Name[:15] if activity.Name else "", activity.Notes[:15] if activity.Notes else "", activity.Stats.Distance.Value, activity.Stats.Distance.Units)) try: activity.Record = self._findOrCreateActivityRecord(activity) # Make it a member of the activity, to avoid passing it around as a seperate parameter everywhere. self._updateSynchronizedActivities(activity) self._updateActivityRecordInitialPrescence(activity) actAvailableFromConnIds = activity.ServiceDataCollection.keys() actAvailableFromConns = [[x for x in self._serviceConnections if x._id == dlSvcRecId][0] for dlSvcRecId in actAvailableFromConnIds] # Check if this is too soon to synchronize if self._user_config["sync_upload_delay"]: endtime = activity.EndTime tz = endtime.tzinfo if not tz and activity.FallbackTZ: tz = activity.FallbackTZ endtime = tz.localize(endtime) if tz and endtime: # We can't really know for sure otherwise time_past = (datetime.utcnow() - endtime.astimezone(pytz.utc).replace(tzinfo=None)) time_past += tz.dst(endtime.replace(tzinfo=None)) if tz.dst(endtime.replace(tzinfo=None)) else timedelta(0) # For some reason DST wasn't being taken into account - maybe just GC? time_remaining = timedelta(seconds=self._user_config["sync_upload_delay"]) - time_past logger.debug(" %s since upload" % time_past) if time_remaining > timedelta(0): activity.Record.MarkAsNotPresentOtherwise(UserException(UserExceptionType.Deferred)) # Only reschedule if it won't slow down their auto-sync timing if time_remaining < (Sync.SyncInterval + Sync.SyncIntervalJitter): next_sync = datetime.utcnow() + time_remaining # Reschedule them so this activity syncs immediately on schedule sync_result.ForceScheduleNextSyncOnOrBefore(next_sync) logger.info("\t\t...is delayed for %s (out of %s)" % (time_remaining, timedelta(seconds=self._user_config["sync_upload_delay"]))) # We need to ensure we check these again when the sync re-runs for conn in actAvailableFromConns: self._persistServiceTrigger(conn) raise ActivityShouldNotSynchronizeException() if self._user_config["sync_skip_before"]: if activity.StartTime.replace(tzinfo=None) < self._user_config["sync_skip_before"]: logger.info("\t\t...predates configured sync window") activity.Record.MarkAsNotPresentOtherwise(UserException(UserExceptionType.PredatesWindow)) raise ActivityShouldNotSynchronizeException() # We don't always know if the activity is private before it's downloaded, but we can check anyways since it saves a lot of time. if activity.Private: override_private = False for conn in actAvailableFromConns: if conn.GetConfiguration()["sync_private"]: override_private = True break if not override_private: logger.info("\t\t...is private and restricted from sync (pre-download)") # Sync exclusion instead? activity.Record.MarkAsNotPresentOtherwise(UserException(UserExceptionType.Private)) raise ActivityShouldNotSynchronizeException() recipientServices = None eligibleServices = None while True: # recipientServices are services that don't already have this activity recipientServices = self._determineRecipientServices(activity) if len(recipientServices) == 0: totalActivities -= 1 # doesn't count raise ActivityShouldNotSynchronizeException() # eligibleServices are services that are permitted to receive this activity - taking into account flow exceptions, excluded services, unfufilled configuration requirements, etc. eligibleServices = self._determineEligibleRecipientServices(activity=activity, recipientServices=recipientServices) if not len(eligibleServices): logger.info("\t\t...has no eligible destinations") totalActivities -= 1 # Again, doesn't really count. raise ActivityShouldNotSynchronizeException() has_deferred = False for conn in eligibleServices: if conn._id in self._deferredServices: logger.info("Doing deferred list from %s" % conn.Service.ID) # no_add since... # a) we're iterating over the list it'd be adding to, and who knows what will happen then # b) for the current use of deferred services, we don't care about new activities self._downloadActivityList(conn, exhaustive, no_add=True) self._deferredServices.remove(conn._id) has_deferred = True # If we had deferred listing activities from a service, we have to repeat this loop to consider the new info # Otherwise, once was enough if not has_deferred: break # This is after the above exit points since they're the most frequent (& cheapest) cases - want to avoid DB churn if heartbeat_callback: heartbeat_callback(SyncStep.Download) if processedActivities == 0: syncProgress = 0 elif totalActivities <= 0: syncProgress = 1 else: syncProgress = max(0, min(1, processedActivities / totalActivities)) self._updateSyncProgress(SyncStep.Download, syncProgress) # The second most important line of logging in the application... logger.info("\t\t...to " + str([x.Service.ID for x in recipientServices])) # Download the full activity record full_activity, activitySource = self._downloadActivity(activity) if full_activity is None: # couldn't download it from anywhere, or the places that had it said it was broken # The activity record gets updated in _downloadActivity processedActivities += 1 # we tried raise ActivityShouldNotSynchronizeException() full_activity.CleanStats() full_activity.CleanWaypoints() try: full_activity.EnsureTZ() except Exception as e: logger.error("\tCould not determine TZ %s" % e) self._accumulateExclusions(full_activity.SourceConnection, APIExcludeActivity("Could not determine TZ", activity=full_activity, permanent=False)) activity.Record.MarkAsNotPresentOtherwise(UserException(UserExceptionType.UnknownTZ)) raise ActivityShouldNotSynchronizeException() else: logger.debug("\tDetermined TZ %s" % full_activity.TZ) try: full_activity.CheckTimestampSanity() except ValueError as e: logger.warning("\t\t...failed timestamp sanity check - %s" % e) # self._accumulateExclusions(full_activity.SourceConnection, APIExcludeActivity("Timestamp sanity check failed", activity=full_activity, permanent=True)) # activity.Record.MarkAsNotPresentOtherwise(UserException(UserExceptionType.SanityError)) # raise ActivityShouldNotSynchronizeException() activity.Record.SetActivity(activity) # Update with whatever more accurate information we may have. full_activity.Record = activity.Record # Some services don't return the same object, so this gets lost, which is meh, but... successful_destination_service_ids = [] for destinationSvcRecord in eligibleServices: if heartbeat_callback: heartbeat_callback(SyncStep.Upload) destSvc = destinationSvcRecord.Service if not destSvc.ReceivesStationaryActivities and full_activity.Stationary: logger.info("\t\t...marked as stationary during download") activity.Record.MarkAsNotPresentOn(destinationSvcRecord, UserException(UserExceptionType.StationaryUnsupported)) continue if not full_activity.Stationary: if not (destSvc.ReceivesNonGPSActivitiesWithOtherSensorData or full_activity.GPS): logger.info("\t\t...marked as non-GPS during download") activity.Record.MarkAsNotPresentOn(destinationSvcRecord, UserException(UserExceptionType.NonGPSUnsupported)) continue uploaded_external_id = None logger.info("\t Uploading to " + destSvc.ID) try: uploaded_external_id = self._uploadActivity(full_activity, destinationSvcRecord) except UploadException: continue # At this point it's already been added to the error collection, so we can just bail. logger.info("\t Uploaded") activity.Record.MarkAsSynchronizedTo(destinationSvcRecord) successful_destination_service_ids.append(destSvc.ID) if uploaded_external_id: # record external ID, for posterity (and later debugging) db.uploaded_activities.insert({"ExternalID": uploaded_external_id, "Service": destSvc.ID, "UserExternalID": destinationSvcRecord.ExternalID, "Timestamp": datetime.utcnow()}) # flag as successful db.connections.update({"_id": destinationSvcRecord._id}, {"$addToSet": {"SynchronizedActivities": {"$each": list(activity.UIDs)}}}) db.sync_stats.update({"ActivityID": activity.UID}, {"$addToSet": {"DestinationServices": destSvc.ID, "SourceServices": activitySource.ID}, "$set": {"Distance": activity.Stats.Distance.asUnits(ActivityStatisticUnit.Meters).Value, "Timestamp": datetime.utcnow()}}, upsert=True) if len(successful_destination_service_ids): self._pushRecentSyncActivity(full_activity, successful_destination_service_ids) del full_activity processedActivities += 1 except ActivityShouldNotSynchronizeException: continue finally: del activity except SynchronizationCompleteException: # This gets thrown when there is obviously nothing left to do - but we still need to clean things up. logger.info("SynchronizationCompleteException thrown") logger.info("Writing back service data") self._writeBackSyncErrorsAndExclusions() if exhaustive: # Clean up potentially orphaned records, since we know everything is here. logger.info("Clearing old activity records") self._dropUntouchedActivityRecords() logger.info("Writing back activity records") self._writeBackActivityRecords() logger.info("Finalizing") # Clear non-persisted extended auth details. self._destroyExtendedAuthData() logger.info("Unlocking user") # Unlock the user. self._unlockUser() except: # oops. logger.exception("Core sync exception") raise else: logger.info("Finished sync for %s (worker %d)" % (self.user["_id"], os.getpid())) finally: self._closeUserLogging() return sync_result class SynchronizationTaskResult: def __init__(self, force_next_sync=None): self.ForceNextSync = force_next_sync def ForceScheduleNextSyncOnOrBefore(self, next_sync): self.ForceNextSync = self.ForceNextSync if self.ForceNextSync and self.ForceNextSync < next_sync else next_sync class UploadException(Exception): pass class ActivityShouldNotSynchronizeException(Exception): pass class SynchronizationCompleteException(Exception): pass class SyncStep: List = "list" Download = "download" Upload = "upload"
""" Copyright (c) 2015, Baidu.com, Inc. All Rights Reserved Use of this source code is governed by a BSD-style license that can be found in the LICENSE file. """ import subprocess import filecmp import os import nose.tools import json from conf import const def print_debug_msg(sid=0, msg=""): """ provide general print interface """ print "@%d======================%s" % (sid, msg) def execute_and_check_returncode(cmd, code): print(cmd) ret = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) ret.communicate() nose.tools.assert_equal(ret.returncode, code) def exe_and_check_res(cmd): """ execute cmd and check result """ print cmd ret = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) nose.tools.assert_equal(ret.stderr.readlines(), []) def clear_env(): """ clear env """ print_debug_msg(4, "delete table_test001 and table_test002, clear env") cmd = "./teracli disable table_test001" exe_and_check_res(cmd) cmd = "./teracli drop table_test001" exe_and_check_res(cmd) cmd = "./teracli disable table_test002" exe_and_check_res(cmd) cmd = "./teracli drop table_test002" exe_and_check_res(cmd) def cleanup(): ret = subprocess.Popen(const.teracli_binary + ' disable test', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) ret = subprocess.Popen(const.teracli_binary + ' drop test', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) files = os.listdir('.') for f in files: if f.endswith('.out'): os.remove(f) def cluster_op(op): if op == 'kill': print 'kill cluster' ret = subprocess.Popen(const.kill_script, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) elif op == 'launch': print 'launch cluster' ret = subprocess.Popen(const.launch_script, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) elif op == 'launch_ts_first': print 'launch cluster' ret = subprocess.Popen(const.launch_ts_first_script, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) else: print 'unknown argument' nose.tools.assert_true(False) def create_kv_table(): print 'create kv table' cleanup() ret = subprocess.Popen(const.teracli_binary + ' create test', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) def create_singleversion_table(): print 'create single version table' cleanup() ret = subprocess.Popen(const.teracli_binary + ' create "test{cf0, cf1}"', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) def create_multiversion_table(): print 'create multi version table' cleanup() ret = subprocess.Popen(const.teracli_binary + ' create "test{cf0<maxversions=20>, cf1<maxversions=20>}"', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) def createbyfile(schema, deli=''): """ This function creates a table according to a specified schema :param schema: schema file path :param deli: deli file path :return: None """ cleanup() create_cmd = '{teracli} createbyfile {schema} {deli}'.format(teracli=const.teracli_binary, schema=schema, deli=deli) print create_cmd ret = subprocess.Popen(create_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) def rowread_table(table_name, file_path): allv = 'scan' tmpfile = 'tmp.file' scan_cmd = '{teracli} {op} {table_name} "" "" > {out}'.format( teracli=const.teracli_binary, op=allv, table_name=table_name, out=tmpfile) print scan_cmd ret = subprocess.Popen(scan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) ret.communicate() tmpfile2 = 'tmp.file2' awk_args = '' awk_args += """-F ':' '{print $1}'""" awk_cmd = 'awk {args} {out} |sort -u > {out1}'.format( args=awk_args, out=tmpfile, out1=tmpfile2) print awk_cmd ret = subprocess.Popen(awk_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) ret.communicate() rowread_cmd = 'while read line; do {teracli} get {table_name} $line; done < {out1} > {output}'.format( teracli=const.teracli_binary, table_name=table_name, out1=tmpfile2, output=file_path) ret = subprocess.Popen(rowread_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) ret.communicate() #ret = subprocess.Popen('rm -rf tmp.file tmp.file2', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) #ret.communicate() def run_tera_mark(file_path, op, table_name, random, value_size, num, key_size, cf='', key_seed=1, value_seed=1): """ This function provide means to write data into Tera and dump a copy into a specified file at the same time. :param file_path: a copy of data will be dumped into file_path for future use :param op: ['w' | 'd'], 'w' indicates write and 'd' indicates delete :param table_name: table name :param random: ['random' | 'seq'] :param value_size: value size in Bytes :param num: entry number :param key_size: key size in Bytes :param cf: cf list, e.g. 'cf0:qual,cf1:flag'. Empty cf list for kv mode. Notice: no space in between :param key_seed: seed for random key generator :param value_seed: seed for random value generator :return: None """ # write data into Tera tera_bench_args = "" awk_args = "" if cf == '': # kv mode tera_bench_args += """--compression_ratio=1 --key_seed={kseed} --value_seed={vseed} """\ """ --value_size={vsize} --num={num} --benchmarks={random} """\ """ --key_size={ksize} """.format(kseed=key_seed, vseed=value_seed, vsize=value_size, num=num, random=random, ksize=key_size) if op == 'd': # delete awk_args += """-F '\t' '{print $1}'""" else: # write awk_args += """-F '\t' '{print $1"\t"$2}'""" else: # table tera_bench_args += """--cf={cf} --compression_ratio=1 --key_seed={kseed} --value_seed={vseed} """\ """ --value_size={vsize} --num={num} --benchmarks={random} """\ """ --key_size={ksize} """.format(cf=cf, kseed=key_seed, vseed=value_seed, vsize=value_size, num=num, random=random, ksize=key_size) if op == 'd': # delete awk_args += """-F '\t' '{print $1"\t"$3"\t"$4}'""" else: # write awk_args += """-F '\t' '{print $1"\t"$2"\t"$3"\t"$4}'""" tera_mark_args = """--mode={op} --tablename={table_name} --type=async """\ """ --verify=false""".format(op=op, table_name=table_name) cmd = '{tera_bench} {bench_args} | awk {awk_args} | {tera_mark} {mark_args}'.format( tera_bench=const.tera_bench_binary, bench_args=tera_bench_args, awk_args=awk_args, tera_mark=const.tera_mark_binary, mark_args=tera_mark_args) print cmd ret = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) # write/append data to a file for comparison for path, is_append in file_path: if cf == '': awk_args = """-F '\t' '{print $1"::0:"$2}'""" else: awk_args = """-F '\t' '{print $1":"$3":"$4":"$2}'""" redirect_op = '' if is_append is True: redirect_op += '>>' else: redirect_op += '>' dump_cmd = '{tera_bench} {tera_bench_args} | awk {awk_args} {redirect_op} {out}'.format( tera_bench=const.tera_bench_binary, tera_bench_args=tera_bench_args, redirect_op=redirect_op, awk_args=awk_args, out=path) print dump_cmd ret = subprocess.Popen(dump_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) def scan_table(table_name, file_path, allversion, snapshot=0, is_async=False): """ This function scans the table and write the output into file_path :param table_name: table name :param file_path: write scan output into file_path :param allversion: [True | False] :param is_async: True for batch scan """ allv = '' if allversion is True: allv += 'scanallv' else: allv += 'scan' if is_async is True: async_flag = '--tera_sdk_scan_async_enabled=true --v=30 --tera_client_scan_async_enabled=true' else: async_flag = '--tera_sdk_scan_async_enabled=false' snapshot_args = '' if snapshot != 0: snapshot_args += '--snapshot={snapshot}'.format(snapshot=snapshot) scan_cmd = '{teracli} {flags} {op} {table_name} "" "" {snapshot} > {out}'.format( teracli=const.teracli_binary, flags=async_flag, op=allv, table_name=table_name, snapshot=snapshot_args, out=file_path) print scan_cmd ret = subprocess.Popen(scan_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) def get_tablet_list(table_name): # TODO: need a more elegant & general way to obtain tablet info show_cmd = '{teracli} show {table}'.format(teracli=const.teracli_binary, table=table_name) print show_cmd ret = subprocess.Popen(show_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) tablet_info = ret.stdout.readlines()[5:] # tablet info starts from the 6th line tablet_info = filter(lambda x: x != '\n', tablet_info) tablet_paths = [] for tablet in tablet_info: comp = filter(None, tablet.split(' ')) tablet_paths.append(comp[2]) return tablet_paths def parse_showinfo(): ''' if you want to get show info, you can call this function to return with a dict ''' show_cmd = '{teracli} show'.format(teracli=const.teracli_binary) print show_cmd ret = subprocess.Popen(show_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) table_info = ret.stdout.readlines()[2:-1] retinfo = {} for line in table_info: line = line.strip("\n") line_list = line.split(" ") list_ret = [line_list[i] for i in range(len(line_list)) if line_list[i] != ""] retinfo[list_ret[1]] = {} retinfo[list_ret[1]]["status"] = list_ret[2] retinfo[list_ret[1]]["size"] = list_ret[3] retinfo[list_ret[1]]["lg_size"] = [list_ret[j] for j in range(4, len(list_ret) - 2)] retinfo[list_ret[1]]["tablet"] = list_ret[len(list_ret) - 2] retinfo[list_ret[1]]["busy"] = list_ret[len(list_ret) - 1] print json.dumps(retinfo) return retinfo def compact_tablets(tablet_list): # TODO: compact may timeout for tablet in tablet_list: compact_cmd = '{teracli} tablet compact {tablet}'.format(teracli=const.teracli_binary, tablet=tablet) print compact_cmd ret = subprocess.Popen(compact_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) def snapshot_op(table_name): """ This function creates | deletes a snapshot :param table_name: table name :return: snapshot id on success, None otherwise """ # TODO: delete snapshot snapshot_cmd = '{teracli} snapshot {table_name} create'.format(teracli=const.teracli_binary, table_name=table_name) print snapshot_cmd ret = subprocess.Popen(snapshot_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) out = ret.stdout.readlines() ret = '' try: ret += out[1] except IndexError: return None if ret.startswith('new snapshot: '): snapshot_id = ret[len('new snapshot: '):-1] if snapshot_id.isdigit(): return int(snapshot_id) return None def rollback_op(table_name, snapshot, rollback_name): """ Invoke rollback action :param table_name: table name :param snapshot: rollback to a specific snapshot :return: None """ rollback_cmd = '{teracli} snapshot {table_name} rollback --snapshot={snapshot} --rollback_name={rname}'.\ format(teracli=const.teracli_binary, table_name=table_name, snapshot=snapshot, rname=rollback_name) print rollback_cmd ret = subprocess.Popen(rollback_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) def compare_files(file1, file2, need_sort): """ This function compares two files. :param file1: file path to the first file :param file2: file path to the second file :param need_sort: whether the files need to be sorted :return: True if the files are the same, False on the other hand """ if need_sort is True: sort_cmd = 'sort {f1} > {f1}.sort; sort {f2} > {f2}.sort'.format(f1=file1, f2=file2) print sort_cmd ret = subprocess.Popen(sort_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) print ''.join(ret.stdout.readlines()) print ''.join(ret.stderr.readlines()) os.rename(file1+'.sort', file1) os.rename(file2+'.sort', file2) return filecmp.cmp(file1, file2, shallow=False) def file_is_empty(file_path): """ This function test whether a file is empty :param file_path: file path :return: True if the file is empty, False on the other hand """ return not os.path.getsize(file_path) def cleanup_files(file_list): for file_path in file_list: os.remove(file_path) def check_show_user_result(cmd, should_contain, substr): print(cmd) ret = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) stdoutdata = ''.join(ret.stdout.readlines()) if should_contain: nose.tools.assert_true(substr in stdoutdata) else: nose.tools.assert_true(substr not in stdoutdata)
from sympy import ( symbols, log, ln, Float, nan, oo, zoo, I, pi, E, exp, Symbol, LambertW, sqrt, Rational, expand_log, S, sign, conjugate, refine, sin, cos, sinh, cosh, tanh, exp_polar, re, Function, simplify, AccumBounds) def test_exp_values(): x, y = symbols('x,y') k = Symbol('k', integer=True) assert exp(nan) == nan assert exp(oo) == oo assert exp(-oo) == 0 assert exp(0) == 1 assert exp(1) == E assert exp(-1 + x).as_base_exp() == (S.Exp1, x - 1) assert exp(1 + x).as_base_exp() == (S.Exp1, x + 1) assert exp(pi*I/2) == I assert exp(pi*I) == -1 assert exp(3*pi*I/2) == -I assert exp(2*pi*I) == 1 assert refine(exp(pi*I*2*k)) == 1 assert refine(exp(pi*I*2*(k + Rational(1, 2)))) == -1 assert refine(exp(pi*I*2*(k + Rational(1, 4)))) == I assert refine(exp(pi*I*2*(k + Rational(3, 4)))) == -I assert exp(log(x)) == x assert exp(2*log(x)) == x**2 assert exp(pi*log(x)) == x**pi assert exp(17*log(x) + E*log(y)) == x**17 * y**E assert exp(x*log(x)) != x**x assert exp(sin(x)*log(x)) != x assert exp(3*log(x) + oo*x) == exp(oo*x) * x**3 assert exp(4*log(x)*log(y) + 3*log(x)) == x**3 * exp(4*log(x)*log(y)) def test_exp_log(): x = Symbol("x", real=True) assert log(exp(x)) == x assert exp(log(x)) == x assert log(x).inverse() == exp assert exp(x).inverse() == log y = Symbol("y", polar=True) z = Symbol("z") assert log(exp_polar(z)) == z assert exp(log(y)) == y def test_exp_expand(): x = Symbol("x") y = Symbol("y") e = exp(log(Rational(2))*(1 + x) - log(Rational(2))*x) assert e.expand() == 2 assert exp(x + y) != exp(x)*exp(y) assert exp(x + y).expand() == exp(x)*exp(y) def test_exp__as_base_exp(): x, y = symbols('x,y') assert exp(x).as_base_exp() == (E, x) assert exp(2*x).as_base_exp() == (E, 2*x) assert exp(x*y).as_base_exp() == (E, x*y) assert exp(-x).as_base_exp() == (E, -x) # Pow( *expr.as_base_exp() ) == expr invariant should hold assert E**x == exp(x) assert E**(2*x) == exp(2*x) assert E**(x*y) == exp(x*y) assert exp(x).base is S.Exp1 assert exp(x).exp == x def test_exp_infinity(): y = Symbol('y') assert exp(I*y) != nan assert refine(exp(I*oo)) == nan assert refine(exp(-I*oo)) == nan assert exp(y*I*oo) != nan def test_exp_subs(): x, y = symbols('x,y') e = (exp(3*log(x), evaluate=False)) # evaluates to x**3 assert e.subs(x**3, y**3) == e assert e.subs(x**2, 5) == e assert (x**3).subs(x**2, y) != y**(3/S(2)) assert exp(exp(x) + exp(x**2)).subs(exp(exp(x)), y) == y * exp(exp(x**2)) assert exp(x).subs(E, y) == y**x x = symbols('x', real=True) assert exp(5*x).subs(exp(7*x), y) == y**Rational(5, 7) assert exp(2*x + 7).subs(exp(3*x), y) == y**Rational(2, 3) * exp(7) x = symbols('x', positive=True) assert exp(3*log(x)).subs(x**2, y) == y**Rational(3, 2) # differentiate between E and exp assert exp(exp(x + E)).subs(exp, 3) == 3**(3**(x + E)) assert exp(exp(x + E)).subs(E, 3) == 3**(3**(x + 3)) assert exp(3).subs(E, sin) == sin(3) def test_exp_conjugate(): x = Symbol('x') assert conjugate(exp(x)) == exp(conjugate(x)) def test_exp_rewrite(): x = symbols('x') assert exp(x).rewrite(sin) == sinh(x) + cosh(x) assert exp(x*I).rewrite(cos) == cos(x) + I*sin(x) assert exp(1).rewrite(cos) == sinh(1) + cosh(1) assert exp(1).rewrite(sin) == sinh(1) + cosh(1) assert exp(1).rewrite(sin) == sinh(1) + cosh(1) assert exp(x).rewrite(tanh) == (1 + tanh(x/2))/(1 - tanh(x/2)) def test_exp_leading_term(): x = symbols('x') assert exp(x).as_leading_term(x) == 1 assert exp(1/x).as_leading_term(x) == exp(1/x) assert exp(2 + x).as_leading_term(x) == exp(2) def test_exp_taylor_term(): x = symbols('x') assert exp(x).taylor_term(1, x) == x assert exp(x).taylor_term(3, x) == x**3/6 def test_log_values(): assert log(nan) == nan assert log(oo) == oo assert log(-oo) == oo assert log(zoo) == zoo assert log(-zoo) == zoo assert log(0) == zoo assert log(1) == 0 assert log(-1) == I*pi assert log(E) == 1 assert log(-E).expand() == 1 + I*pi assert log(pi) == log(pi) assert log(-pi).expand() == log(pi) + I*pi assert log(17) == log(17) assert log(-17) == log(17) + I*pi assert log(I) == I*pi/2 assert log(-I) == -I*pi/2 assert log(17*I) == I*pi/2 + log(17) assert log(-17*I).expand() == -I*pi/2 + log(17) assert log(oo*I) == oo assert log(-oo*I) == oo assert log(0, 2) == zoo assert log(0, 5) == zoo assert exp(-log(3))**(-1) == 3 assert log(S.Half) == -log(2) assert log(2*3).func is log assert log(2*3**2).func is log def test_log_base(): assert log(1, 2) == 0 assert log(2, 2) == 1 assert log(3, 2) == log(3)/log(2) assert log(6, 2) == 1 + log(3)/log(2) assert log(6, 3) == 1 + log(2)/log(3) assert log(2**3, 2) == 3 assert log(3**3, 3) == 3 assert log(5, 1) == zoo assert log(1, 1) == nan assert log(Rational(2, 3), 10) == (-log(3) + log(2))/log(10) assert log(Rational(2, 3), Rational(1, 3)) == -log(2)/log(3) + 1 assert log(Rational(2, 3), Rational(2, 5)) == \ (-log(3) + log(2))/(-log(5) + log(2)) def test_log_symbolic(): x, y = symbols('x,y') assert log(x, exp(1)) == log(x) assert log(exp(x)) != x assert log(x, exp(1)) == log(x) assert log(x*y) != log(x) + log(y) assert log(x/y).expand() != log(x) - log(y) assert log(x/y).expand(force=True) == log(x) - log(y) assert log(x**y).expand() != y*log(x) assert log(x**y).expand(force=True) == y*log(x) assert log(x, 2) == log(x)/log(2) assert log(E, 2) == 1/log(2) p, q = symbols('p,q', positive=True) r = Symbol('r', real=True) assert log(p**2) != 2*log(p) assert log(p**2).expand() == 2*log(p) assert log(x**2).expand() != 2*log(x) assert log(p**q) != q*log(p) assert log(exp(p)) == p assert log(p*q) != log(p) + log(q) assert log(p*q).expand() == log(p) + log(q) assert log(-sqrt(3)) == log(sqrt(3)) + I*pi assert log(-exp(p)) != p + I*pi assert log(-exp(x)).expand() != x + I*pi assert log(-exp(r)).expand() == r + I*pi assert log(x**y) != y*log(x) assert (log(x**-5)**-1).expand() != -1/log(x)/5 assert (log(p**-5)**-1).expand() == -1/log(p)/5 assert log(-x).func is log and log(-x).args[0] == -x assert log(-p).func is log and log(-p).args[0] == -p def test_exp_assumptions(): x = Symbol('x') r = Symbol('r', real=True) i = Symbol('i', imaginary=True) for e in exp, exp_polar: assert e(x).is_real is None assert e(x).is_imaginary is None assert e(i).is_real is None assert e(i).is_imaginary is None assert e(r).is_real is True assert e(r).is_imaginary is False assert e(re(x)).is_real is True assert e(re(x)).is_imaginary is False assert exp(0, evaluate=False).is_algebraic a = Symbol('a', algebraic=True) an = Symbol('an', algebraic=True, nonzero=True) r = Symbol('r', rational=True) rn = Symbol('rn', rational=True, nonzero=True) assert exp(a).is_algebraic is None assert exp(an).is_algebraic is False assert exp(pi*r).is_algebraic is None assert exp(pi*rn).is_algebraic is False def test_exp_AccumBounds(): assert exp(AccumBounds(1, 2)) == AccumBounds(E, E**2) def test_log_assumptions(): p = symbols('p', positive=True) n = symbols('n', negative=True) z = symbols('z', zero=True) x = symbols('x', infinite=True, positive=True) assert log(z).is_positive is False assert log(x).is_positive is True assert log(2) > 0 assert log(1, evaluate=False).is_zero assert log(1 + z).is_zero assert log(p).is_zero is None assert log(n).is_zero is False assert log(0.5).is_negative is True assert log(exp(p) + 1).is_positive assert log(1, evaluate=False).is_algebraic assert log(42, evaluate=False).is_algebraic is False assert log(1 + z).is_rational def test_log_hashing(): x = Symbol("y") assert x != log(log(x)) assert hash(x) != hash(log(log(x))) assert log(x) != log(log(log(x))) e = 1/log(log(x) + log(log(x))) assert e.base.func is log e = 1/log(log(x) + log(log(log(x)))) assert e.base.func is log x = Symbol("x") e = log(log(x)) assert e.func is log assert not x.func is log assert hash(log(log(x))) != hash(x) assert e != x def test_log_sign(): assert sign(log(2)) == 1 def test_log_expand_complex(): assert log(1 + I).expand(complex=True) == log(2)/2 + I*pi/4 assert log(1 - sqrt(2)).expand(complex=True) == log(-1 + sqrt(2)) + I*pi def test_log_apply_evalf(): value = (log(3)/log(2) - 1).evalf() assert value.epsilon_eq(Float("0.58496250072115618145373")) def test_log_expand(): w = Symbol("w", positive=True) e = log(w**(log(5)/log(3))) assert e.expand() == log(5)/log(3) * log(w) x, y, z = symbols('x,y,z', positive=True) assert log(x*(y + z)).expand(mul=False) == log(x) + log(y + z) assert log(log(x**2)*log(y*z)).expand() in [log(2*log(x)*log(y) + 2*log(x)*log(z)), log(log(x)*log(z) + log(y)*log(x)) + log(2), log((log(y) + log(z))*log(x)) + log(2)] assert log(x**log(x**2)).expand(deep=False) == log(x)*log(x**2) assert log(x**log(x**2)).expand() == 2*log(x)**2 assert (log(x*(y + z))*(x + y)), expand(mul=True, log=True) == y*log( x) + y*log(y + z) + z*log(x) + z*log(y + z) x, y = symbols('x,y') assert log(x*y).expand(force=True) == log(x) + log(y) assert log(x**y).expand(force=True) == y*log(x) assert log(exp(x)).expand(force=True) == x # there's generally no need to expand out logs since this requires # factoring and if simplification is sought, it's cheaper to put # logs together than it is to take them apart. assert log(2*3**2).expand() != 2*log(3) + log(2) def test_log_simplify(): x = Symbol("x", positive=True) assert log(x**2).expand() == 2*log(x) assert expand_log(log(x**(2 + log(2)))) == (2 + log(2))*log(x) def test_log_AccumBounds(): assert log(AccumBounds(1, E)) == AccumBounds(0, 1) def test_lambertw(): x = Symbol('x') k = Symbol('k') assert LambertW(x, 0) == LambertW(x) assert LambertW(x, 0, evaluate=False) != LambertW(x) assert LambertW(0) == 0 assert LambertW(E) == 1 assert LambertW(-1/E) == -1 assert LambertW(-log(2)/2) == -log(2) assert LambertW(oo) == oo assert LambertW(0, 1) == -oo assert LambertW(0, 42) == -oo assert LambertW(-pi/2, -1) == -I*pi/2 assert LambertW(-1/E, -1) == -1 assert LambertW(-2*exp(-2), -1) == -2 assert LambertW(x**2).diff(x) == 2*LambertW(x**2)/x/(1 + LambertW(x**2)) assert LambertW(x, k).diff(x) == LambertW(x, k)/x/(1 + LambertW(x, k)) assert LambertW(sqrt(2)).evalf(30).epsilon_eq( Float("0.701338383413663009202120278965", 30), 1e-29) assert re(LambertW(2, -1)).evalf().epsilon_eq(Float("-0.834310366631110")) assert LambertW(-1).is_real is False # issue 5215 assert LambertW(2, evaluate=False).is_real p = Symbol('p', positive=True) assert LambertW(p, evaluate=False).is_real assert LambertW(p - 1, evaluate=False).is_real is None assert LambertW(-p - 2/S.Exp1, evaluate=False).is_real is False assert LambertW(S.Half, -1, evaluate=False).is_real is False assert LambertW(-S.One/10, -1, evaluate=False).is_real assert LambertW(-10, -1, evaluate=False).is_real is False assert LambertW(-2, 2, evaluate=False).is_real is False assert LambertW(0, evaluate=False).is_algebraic na = Symbol('na', nonzero=True, algebraic=True) assert LambertW(na).is_algebraic is False def test_issue_5673(): e = LambertW(-1) assert e.is_comparable is False assert e.is_positive is not True e2 = 1 - 1/(1 - exp(-1000)) assert e.is_positive is not True e3 = -2 + exp(exp(LambertW(log(2)))*LambertW(log(2))) assert e3.is_nonzero is not True def test_exp_expand_NC(): A, B, C = symbols('A,B,C', commutative=False) x, y, z = symbols('x,y,z') assert exp(A + B).expand() == exp(A + B) assert exp(A + B + C).expand() == exp(A + B + C) assert exp(x + y).expand() == exp(x)*exp(y) assert exp(x + y + z).expand() == exp(x)*exp(y)*exp(z) def test_as_numer_denom(): from sympy.abc import x n = symbols('n', negative=True) assert exp(x).as_numer_denom() == (exp(x), 1) assert exp(-x).as_numer_denom() == (1, exp(x)) assert exp(-2*x).as_numer_denom() == (1, exp(2*x)) assert exp(-2).as_numer_denom() == (1, exp(2)) assert exp(n).as_numer_denom() == (1, exp(-n)) assert exp(-n).as_numer_denom() == (exp(-n), 1) assert exp(-I*x).as_numer_denom() == (1, exp(I*x)) assert exp(-I*n).as_numer_denom() == (1, exp(I*n)) assert exp(-n).as_numer_denom() == (exp(-n), 1) def test_polar(): x, y = symbols('x y', polar=True) z = Symbol('z') assert abs(exp_polar(I*4)) == 1 assert exp_polar(I*10).n() == exp_polar(I*10) assert log(exp_polar(z)) == z assert log(x*y).expand() == log(x) + log(y) assert log(x**z).expand() == z*log(x) assert exp_polar(3).exp == 3 # Compare exp(1.0*pi*I). assert (exp_polar(1.0*pi*I).n(n=5)).as_real_imag()[1] >= 0 assert exp_polar(0).is_rational is True # issue 8008 def test_log_product(): from sympy.abc import n, m i, j = symbols('i,j', positive=True, integer=True) x, y = symbols('x,y', positive=True) from sympy.concrete import Product, Sum f, g = Function('f'), Function('g') assert simplify(log(Product(x**i, (i, 1, n)))) == Sum(i*log(x), (i, 1, n)) assert simplify(log(Product(x**i*y**j, (i, 1, n), (j, 1, m)))) == \ log(Product(x**i*y**j, (i, 1, n), (j, 1, m))) expr = log(Product(-2, (n, 0, 4))) assert simplify(expr) == expr def test_issue_8866(): x = Symbol('x') assert simplify(log(x, 10, evaluate=False)) == simplify(log(x, 10)) assert expand_log(log(x, 10, evaluate=False)) == expand_log(log(x, 10)) y = Symbol('y', positive=True) l1 = log(exp(y), exp(10)) b1 = log(exp(y), exp(5)) l2 = log(exp(y), exp(10), evaluate=False) b2 = log(exp(y), exp(5), evaluate=False) assert simplify(log(l1, b1)) == simplify(log(l2, b2)) assert expand_log(log(l1, b1)) == expand_log(log(l2, b2)) def test_issue_9116(): n = Symbol('n', positive=True, integer=True) assert ln(n).is_nonnegative is True assert log(n).is_nonnegative is True
import argparse import os import sys import textwrap import configargparse import locust version = locust.__version__ DEFAULT_CONFIG_FILES = ["~/.locust.conf", "locust.conf"] def _is_package(path): """ Is the given path a Python package? """ return os.path.isdir(path) and os.path.exists(os.path.join(path, "__init__.py")) def find_locustfile(locustfile): """ Attempt to locate a locustfile, either explicitly or by searching parent dirs. """ # Obtain env value names = [locustfile] # Create .py version if necessary if not names[0].endswith(".py"): names.append(names[0] + ".py") # Does the name contain path elements? if os.path.dirname(names[0]): # If so, expand home-directory markers and test for existence for name in names: expanded = os.path.expanduser(name) if os.path.exists(expanded): if name.endswith(".py") or _is_package(expanded): return os.path.abspath(expanded) else: # Otherwise, start in cwd and work downwards towards filesystem root path = os.path.abspath(".") while True: for name in names: joined = os.path.join(path, name) if os.path.exists(joined): if name.endswith(".py") or _is_package(joined): return os.path.abspath(joined) parent_path = os.path.dirname(path) if parent_path == path: # we've reached the root path which has been checked this iteration break path = parent_path # Implicit 'return None' if nothing was found def get_empty_argument_parser(add_help=True, default_config_files=DEFAULT_CONFIG_FILES): parser = configargparse.ArgumentParser( default_config_files=default_config_files, add_env_var_help=False, add_config_file_help=False, add_help=add_help, formatter_class=argparse.RawDescriptionHelpFormatter, usage=argparse.SUPPRESS, description=textwrap.dedent( """ Usage: locust [OPTIONS] [UserClass ...] """ ), # epilog="", ) parser.add_argument( "-f", "--locustfile", default="locustfile", help="Python module file to import, e.g. '../other.py'. Default: locustfile", env_var="LOCUST_LOCUSTFILE", ) parser.add_argument("--config", is_config_file_arg=True, help="Config file path") return parser def parse_locustfile_option(args=None): """ Construct a command line parser that is only used to parse the -f argument so that we can import the test scripts in case any of them adds additional command line arguments to the parser """ parser = get_empty_argument_parser(add_help=False) parser.add_argument( "-h", "--help", action="store_true", default=False, ) parser.add_argument( "--version", "-V", action="store_true", default=False, ) options, _ = parser.parse_known_args(args=args) locustfile = find_locustfile(options.locustfile) if not locustfile: if options.help or options.version: # if --help or --version is specified we'll call parse_options which will print the help/version message parse_options(args=args) sys.stderr.write( "Could not find any locustfile! Ensure file ends in '.py' and see --help for available options.\n" ) sys.exit(1) if locustfile == "locust.py": sys.stderr.write("The locustfile must not be named `locust.py`. Please rename the file and try again.\n") sys.exit(1) return locustfile def setup_parser_arguments(parser): """ Setup command-line options Takes a configargparse.ArgumentParser as argument and calls it's add_argument for each of the supported arguments """ parser._optionals.title = "Common options" parser.add_argument( "-H", "--host", help="Host to load test in the following format: http://10.21.32.33", env_var="LOCUST_HOST", ) parser.add_argument( "-u", "--users", type=int, dest="num_users", help="Number of concurrent Locust users. Primarily used together with --headless. Can be changed during a test by inputs w, W(spawn 1, 10 users) and s, S(stop 1, 10 users)", env_var="LOCUST_USERS", ) parser.add_argument( "-r", "--spawn-rate", type=float, help="The rate per second in which users are spawned. Primarily used together with --headless", env_var="LOCUST_SPAWN_RATE", ) parser.add_argument( "--hatch-rate", env_var="LOCUST_HATCH_RATE", type=float, default=0, help=configargparse.SUPPRESS, ) parser.add_argument( "-t", "--run-time", help="Stop after the specified amount of time, e.g. (300s, 20m, 3h, 1h30m, etc.). Only used together with --headless. Defaults to run forever.", env_var="LOCUST_RUN_TIME", ) parser.add_argument( "-l", "--list", action="store_true", dest="list_commands", help="Show list of possible User classes and exit", ) web_ui_group = parser.add_argument_group("Web UI options") web_ui_group.add_argument( "--web-host", default="", help="Host to bind the web interface to. Defaults to '*' (all interfaces)", env_var="LOCUST_WEB_HOST", ) web_ui_group.add_argument( "--web-port", "-P", type=int, default=8089, help="Port on which to run web host", env_var="LOCUST_WEB_PORT", ) web_ui_group.add_argument( "--headless", action="store_true", help="Disable the web interface, and instead start the load test immediately. Requires -u and -t to be specified.", env_var="LOCUST_HEADLESS", ) # Override --headless parameter (useful because you cant disable a store_true-parameter like headless once it has been set in a config file) web_ui_group.add_argument( "--headful", action="store_true", help=configargparse.SUPPRESS, env_var="LOCUST_HEADFUL", ) web_ui_group.add_argument( "--web-auth", type=str, dest="web_auth", default=None, help="Turn on Basic Auth for the web interface. Should be supplied in the following format: username:password", env_var="LOCUST_WEB_AUTH", ) web_ui_group.add_argument( "--tls-cert", default="", help="Optional path to TLS certificate to use to serve over HTTPS", env_var="LOCUST_TLS_CERT", ) web_ui_group.add_argument( "--tls-key", default="", help="Optional path to TLS private key to use to serve over HTTPS", env_var="LOCUST_TLS_KEY", ) master_group = parser.add_argument_group( "Master options", "Options for running a Locust Master node when running Locust distributed. A Master node need Worker nodes that connect to it before it can run load tests.", ) # if locust should be run in distributed mode as master master_group.add_argument( "--master", action="store_true", help="Set locust to run in distributed mode with this process as master", env_var="LOCUST_MODE_MASTER", ) master_group.add_argument( "--master-bind-host", default="*", help="Interfaces (hostname, ip) that locust master should bind to. Only used when running with --master. Defaults to * (all available interfaces).", env_var="LOCUST_MASTER_BIND_HOST", ) master_group.add_argument( "--master-bind-port", type=int, default=5557, help="Port that locust master should bind to. Only used when running with --master. Defaults to 5557.", env_var="LOCUST_MASTER_BIND_PORT", ) master_group.add_argument( "--expect-workers", type=int, default=1, help="How many workers master should expect to connect before starting the test (only when --headless used).", env_var="LOCUST_EXPECT_WORKERS", ) master_group.add_argument( "--expect-slaves", action="store_true", help=configargparse.SUPPRESS, ) worker_group = parser.add_argument_group( "Worker options", textwrap.dedent( """ Options for running a Locust Worker node when running Locust distributed. Only the LOCUSTFILE (-f option) need to be specified when starting a Worker, since other options such as -u, -r, -t are specified on the Master node. """ ), ) # if locust should be run in distributed mode as worker worker_group.add_argument( "--worker", action="store_true", help="Set locust to run in distributed mode with this process as worker", env_var="LOCUST_MODE_WORKER", ) worker_group.add_argument( "--slave", action="store_true", help=configargparse.SUPPRESS, ) # master host options worker_group.add_argument( "--master-host", default="127.0.0.1", help="Host or IP address of locust master for distributed load testing. Only used when running with --worker. Defaults to 127.0.0.1.", env_var="LOCUST_MASTER_NODE_HOST", metavar="MASTER_NODE_HOST", ) worker_group.add_argument( "--master-port", type=int, default=5557, help="The port to connect to that is used by the locust master for distributed load testing. Only used when running with --worker. Defaults to 5557.", env_var="LOCUST_MASTER_NODE_PORT", metavar="MASTER_NODE_PORT", ) tag_group = parser.add_argument_group( "Tag options", "Locust tasks can be tagged using the @tag decorator. These options let specify which tasks to include or exclude during a test.", ) tag_group.add_argument( "-T", "--tags", nargs="*", metavar="TAG", env_var="LOCUST_TAGS", help="List of tags to include in the test, so only tasks with any matching tags will be executed", ) tag_group.add_argument( "-E", "--exclude-tags", nargs="*", metavar="TAG", env_var="LOCUST_EXCLUDE_TAGS", help="List of tags to exclude from the test, so only tasks with no matching tags will be executed", ) stats_group = parser.add_argument_group("Request statistics options") stats_group.add_argument( "--csv", # Name repeated in 'parse_options' dest="csv_prefix", help="Store current request stats to files in CSV format. Setting this option will generate three files: [CSV_PREFIX]_stats.csv, [CSV_PREFIX]_stats_history.csv and [CSV_PREFIX]_failures.csv", env_var="LOCUST_CSV", ) stats_group.add_argument( "--csv-full-history", # Name repeated in 'parse_options' action="store_true", default=False, dest="stats_history_enabled", help="Store each stats entry in CSV format to _stats_history.csv file. You must also specify the '--csv' argument to enable this.", env_var="LOCUST_CSV_FULL_HISTORY", ) stats_group.add_argument( "--print-stats", action="store_true", help="Print stats in the console", env_var="LOCUST_PRINT_STATS", ) stats_group.add_argument( "--only-summary", action="store_true", help="Only print the summary stats", env_var="LOCUST_ONLY_SUMMARY", ) stats_group.add_argument( "--reset-stats", action="store_true", help="Reset statistics once spawning has been completed. Should be set on both master and workers when running in distributed mode", env_var="LOCUST_RESET_STATS", ) stats_group.add_argument( "--html", dest="html_file", help="Store HTML report file", env_var="LOCUST_HTML", ) log_group = parser.add_argument_group("Logging options") log_group.add_argument( "--skip-log-setup", action="store_true", dest="skip_log_setup", default=False, help="Disable Locust's logging setup. Instead, the configuration is provided by the Locust test or Python defaults.", env_var="LOCUST_SKIP_LOG_SETUP", ) log_group.add_argument( "--loglevel", "-L", default="INFO", help="Choose between DEBUG/INFO/WARNING/ERROR/CRITICAL. Default is INFO.", env_var="LOCUST_LOGLEVEL", ) log_group.add_argument( "--logfile", help="Path to log file. If not set, log will go to stdout/stderr", env_var="LOCUST_LOGFILE", ) step_load_group = parser.add_argument_group("Step load options") step_load_group.add_argument("--step-load", action="store_true", help=configargparse.SUPPRESS) step_load_group.add_argument("--step-users", type=int, help=configargparse.SUPPRESS) step_load_group.add_argument("--step-clients", action="store_true", help=configargparse.SUPPRESS) step_load_group.add_argument("--step-time", help=configargparse.SUPPRESS) other_group = parser.add_argument_group("Other options") other_group.add_argument( "--show-task-ratio", action="store_true", help="Print table of the User classes' task execution ratio" ) other_group.add_argument( "--show-task-ratio-json", action="store_true", help="Print json data of the User classes' task execution ratio" ) # optparse gives you --version but we have to do it ourselves to get -V too other_group.add_argument( "--version", "-V", action="version", help="Show program's version number and exit", version="%(prog)s {}".format(version), ) other_group.add_argument( "--exit-code-on-error", type=int, default=1, help="Sets the process exit code to use when a test result contain any failure or error", env_var="LOCUST_EXIT_CODE_ON_ERROR", ) other_group.add_argument( "-s", "--stop-timeout", action="store", type=int, dest="stop_timeout", default=None, help="Number of seconds to wait for a simulated user to complete any executing task before exiting. Default is to terminate immediately. This parameter only needs to be specified for the master process when running Locust distributed.", env_var="LOCUST_STOP_TIMEOUT", ) user_classes_group = parser.add_argument_group("User classes") user_classes_group.add_argument( "user_classes", nargs="*", metavar="UserClass", help="Optionally specify which User classes that should be used (available User classes can be listed with -l or --list)", ) def get_parser(default_config_files=DEFAULT_CONFIG_FILES): # get a parser that is only able to parse the -f argument parser = get_empty_argument_parser(add_help=True, default_config_files=default_config_files) # add all the other supported arguments setup_parser_arguments(parser) # fire event to provide a hook for locustscripts and plugins to add command line arguments locust.events.init_command_line_parser.fire(parser=parser) return parser def parse_options(args=None): parser = get_parser() parsed_opts = parser.parse_args(args=args) if parsed_opts.stats_history_enabled and (parsed_opts.csv_prefix is None): parser.error("'--csv-full-history' requires '--csv'.") return parsed_opts
# # Licensed to the Apache Software Foundation (ASF) under one or more # contributor license agreements. See the NOTICE file distributed with # this work for additional information regarding copyright ownership. # The ASF licenses this file to You under the Apache License, Version 2.0 # (the "License"); you may not use this file except in compliance with # the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # import os import shutil import sys from threading import Lock from tempfile import NamedTemporaryFile from pyspark import accumulators from pyspark.accumulators import Accumulator from pyspark.broadcast import Broadcast from pyspark.conf import SparkConf from pyspark.files import SparkFiles from pyspark.java_gateway import launch_gateway from pyspark.serializers import PickleSerializer, BatchedSerializer, UTF8Deserializer from pyspark.storagelevel import StorageLevel from pyspark.rdd import RDD from py4j.java_collections import ListConverter class SparkContext(object): """ Main entry point for Spark functionality. A SparkContext represents the connection to a Spark cluster, and can be used to create L{RDD}s and broadcast variables on that cluster. """ _gateway = None _jvm = None _writeToFile = None _next_accum_id = 0 _active_spark_context = None _lock = Lock() _python_includes = None # zip and egg files that need to be added to PYTHONPATH def __init__(self, master=None, appName=None, sparkHome=None, pyFiles=None, environment=None, batchSize=1024, serializer=PickleSerializer(), conf=None, gateway=None): """ Create a new SparkContext. At least the master and app name should be set, either through the named parameters here or through C{conf}. @param master: Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]). @param appName: A name for your job, to display on the cluster web UI. @param sparkHome: Location where Spark is installed on cluster nodes. @param pyFiles: Collection of .zip or .py files to send to the cluster and add to PYTHONPATH. These can be paths on the local file system or HDFS, HTTP, HTTPS, or FTP URLs. @param environment: A dictionary of environment variables to set on worker nodes. @param batchSize: The number of Python objects represented as a single Java object. Set 1 to disable batching or -1 to use an unlimited batch size. @param serializer: The serializer for RDDs. @param conf: A L{SparkConf} object setting Spark properties. @param gateway: Use an existing gateway and JVM, otherwise a new JVM will be instatiated. >>> from pyspark.context import SparkContext >>> sc = SparkContext('local', 'test') >>> sc2 = SparkContext('local', 'test2') # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): ... ValueError:... """ SparkContext._ensure_initialized(self, gateway=gateway) self.environment = environment or {} self._conf = conf or SparkConf(_jvm=self._jvm) self._batchSize = batchSize # -1 represents an unlimited batch size self._unbatched_serializer = serializer if batchSize == 1: self.serializer = self._unbatched_serializer else: self.serializer = BatchedSerializer(self._unbatched_serializer, batchSize) # Set any parameters passed directly to us on the conf if master: self._conf.setMaster(master) if appName: self._conf.setAppName(appName) if sparkHome: self._conf.setSparkHome(sparkHome) if environment: for key, value in environment.iteritems(): self._conf.setExecutorEnv(key, value) # Check that we have at least the required parameters if not self._conf.contains("spark.master"): raise Exception("A master URL must be set in your configuration") if not self._conf.contains("spark.app.name"): raise Exception("An application name must be set in your configuration") # Read back our properties from the conf in case we loaded some of them from # the classpath or an external config file self.master = self._conf.get("spark.master") self.appName = self._conf.get("spark.app.name") self.sparkHome = self._conf.get("spark.home", None) for (k, v) in self._conf.getAll(): if k.startswith("spark.executorEnv."): varName = k[len("spark.executorEnv."):] self.environment[varName] = v # Create the Java SparkContext through Py4J self._jsc = self._initialize_context(self._conf._jconf) # Create a single Accumulator in Java that we'll send all our updates through; # they will be passed back to us through a TCP server self._accumulatorServer = accumulators._start_update_server() (host, port) = self._accumulatorServer.server_address self._javaAccumulator = self._jsc.accumulator( self._jvm.java.util.ArrayList(), self._jvm.PythonAccumulatorParam(host, port)) self.pythonExec = os.environ.get("PYSPARK_PYTHON", 'python') # Broadcast's __reduce__ method stores Broadcast instances here. # This allows other code to determine which Broadcast instances have # been pickled, so it can determine which Java broadcast objects to # send. self._pickled_broadcast_vars = set() SparkFiles._sc = self root_dir = SparkFiles.getRootDirectory() sys.path.append(root_dir) # Deploy any code dependencies specified in the constructor self._python_includes = list() for path in (pyFiles or []): self.addPyFile(path) # Create a temporary directory inside spark.local.dir: local_dir = self._jvm.org.apache.spark.util.Utils.getLocalDir(self._jsc.sc().conf()) self._temp_dir = \ self._jvm.org.apache.spark.util.Utils.createTempDir(local_dir).getAbsolutePath() # Initialize SparkContext in function to allow subclass specific initialization def _initialize_context(self, jconf): return self._jvm.JavaSparkContext(jconf) @classmethod def _ensure_initialized(cls, instance=None, gateway=None): with SparkContext._lock: if not SparkContext._gateway: SparkContext._gateway = gateway or launch_gateway() SparkContext._jvm = SparkContext._gateway.jvm SparkContext._writeToFile = SparkContext._jvm.PythonRDD.writeToFile if instance: if SparkContext._active_spark_context and SparkContext._active_spark_context != instance: raise ValueError("Cannot run multiple SparkContexts at once") else: SparkContext._active_spark_context = instance @classmethod def setSystemProperty(cls, key, value): """ Set a Java system property, such as spark.executor.memory. This must must be invoked before instantiating SparkContext. """ SparkContext._ensure_initialized() SparkContext._jvm.java.lang.System.setProperty(key, value) @property def defaultParallelism(self): """ Default level of parallelism to use when not given by user (e.g. for reduce tasks) """ return self._jsc.sc().defaultParallelism() def __del__(self): self.stop() def stop(self): """ Shut down the SparkContext. """ if self._jsc: self._jsc.stop() self._jsc = None if self._accumulatorServer: self._accumulatorServer.shutdown() self._accumulatorServer = None with SparkContext._lock: SparkContext._active_spark_context = None def parallelize(self, c, numSlices=None): """ Distribute a local Python collection to form an RDD. >>> sc.parallelize(range(5), 5).glom().collect() [[0], [1], [2], [3], [4]] """ numSlices = numSlices or self.defaultParallelism # Calling the Java parallelize() method with an ArrayList is too slow, # because it sends O(n) Py4J commands. As an alternative, serialized # objects are written to a file and loaded through textFile(). tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir) # Make sure we distribute data evenly if it's smaller than self.batchSize if "__len__" not in dir(c): c = list(c) # Make it a list so we can compute its length batchSize = min(len(c) // numSlices, self._batchSize) if batchSize > 1: serializer = BatchedSerializer(self._unbatched_serializer, batchSize) else: serializer = self._unbatched_serializer serializer.dump_stream(c, tempFile) tempFile.close() readRDDFromFile = self._jvm.PythonRDD.readRDDFromFile jrdd = readRDDFromFile(self._jsc, tempFile.name, numSlices) return RDD(jrdd, self, serializer) def textFile(self, name, minSplits=None): """ Read a text file from HDFS, a local file system (available on all nodes), or any Hadoop-supported file system URI, and return it as an RDD of Strings. """ minSplits = minSplits or min(self.defaultParallelism, 2) return RDD(self._jsc.textFile(name, minSplits), self, UTF8Deserializer()) def _checkpointFile(self, name, input_deserializer): jrdd = self._jsc.checkpointFile(name) return RDD(jrdd, self, input_deserializer) def union(self, rdds): """ Build the union of a list of RDDs. This supports unions() of RDDs with different serialized formats, although this forces them to be reserialized using the default serializer: >>> path = os.path.join(tempdir, "union-text.txt") >>> with open(path, "w") as testFile: ... testFile.write("Hello") >>> textFile = sc.textFile(path) >>> textFile.collect() [u'Hello'] >>> parallelized = sc.parallelize(["World!"]) >>> sorted(sc.union([textFile, parallelized]).collect()) [u'Hello', 'World!'] """ first_jrdd_deserializer = rdds[0]._jrdd_deserializer if any(x._jrdd_deserializer != first_jrdd_deserializer for x in rdds): rdds = [x._reserialize() for x in rdds] first = rdds[0]._jrdd rest = [x._jrdd for x in rdds[1:]] rest = ListConverter().convert(rest, self._gateway._gateway_client) return RDD(self._jsc.union(first, rest), self, rdds[0]._jrdd_deserializer) def broadcast(self, value): """ Broadcast a read-only variable to the cluster, returning a L{Broadcast<pyspark.broadcast.Broadcast>} object for reading it in distributed functions. The variable will be sent to each cluster only once. """ pickleSer = PickleSerializer() pickled = pickleSer.dumps(value) jbroadcast = self._jsc.broadcast(bytearray(pickled)) return Broadcast(jbroadcast.id(), value, jbroadcast, self._pickled_broadcast_vars) def accumulator(self, value, accum_param=None): """ Create an L{Accumulator} with the given initial value, using a given L{AccumulatorParam} helper object to define how to add values of the data type if provided. Default AccumulatorParams are used for integers and floating-point numbers if you do not provide one. For other types, a custom AccumulatorParam can be used. """ if accum_param is None: if isinstance(value, int): accum_param = accumulators.INT_ACCUMULATOR_PARAM elif isinstance(value, float): accum_param = accumulators.FLOAT_ACCUMULATOR_PARAM elif isinstance(value, complex): accum_param = accumulators.COMPLEX_ACCUMULATOR_PARAM else: raise Exception("No default accumulator param for type %s" % type(value)) SparkContext._next_accum_id += 1 return Accumulator(SparkContext._next_accum_id - 1, value, accum_param) def addFile(self, path): """ Add a file to be downloaded with this Spark job on every node. The C{path} passed can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs, use L{SparkFiles.get(path)<pyspark.files.SparkFiles.get>} to find its download location. >>> from pyspark import SparkFiles >>> path = os.path.join(tempdir, "test.txt") >>> with open(path, "w") as testFile: ... testFile.write("100") >>> sc.addFile(path) >>> def func(iterator): ... with open(SparkFiles.get("test.txt")) as testFile: ... fileVal = int(testFile.readline()) ... return [x * 100 for x in iterator] >>> sc.parallelize([1, 2, 3, 4]).mapPartitions(func).collect() [100, 200, 300, 400] """ self._jsc.sc().addFile(path) def clearFiles(self): """ Clear the job's list of files added by L{addFile} or L{addPyFile} so that they do not get downloaded to any new nodes. """ # TODO: remove added .py or .zip files from the PYTHONPATH? self._jsc.sc().clearFiles() def addPyFile(self, path): """ Add a .py or .zip dependency for all tasks to be executed on this SparkContext in the future. The C{path} passed can be either a local file, a file in HDFS (or other Hadoop-supported filesystems), or an HTTP, HTTPS or FTP URI. """ self.addFile(path) (dirname, filename) = os.path.split(path) # dirname may be directory or HDFS/S3 prefix if filename.endswith('.zip') or filename.endswith('.ZIP') or filename.endswith('.egg'): self._python_includes.append(filename) sys.path.append(os.path.join(SparkFiles.getRootDirectory(), filename)) # for tests in local mode def setCheckpointDir(self, dirName): """ Set the directory under which RDDs are going to be checkpointed. The directory must be a HDFS path if running on a cluster. """ self._jsc.sc().setCheckpointDir(dirName) def _getJavaStorageLevel(self, storageLevel): """ Returns a Java StorageLevel based on a pyspark.StorageLevel. """ if not isinstance(storageLevel, StorageLevel): raise Exception("storageLevel must be of type pyspark.StorageLevel") newStorageLevel = self._jvm.org.apache.spark.storage.StorageLevel return newStorageLevel(storageLevel.useDisk, storageLevel.useMemory, storageLevel.deserialized, storageLevel.replication) def _test(): import atexit import doctest import tempfile globs = globals().copy() globs['sc'] = SparkContext('local[4]', 'PythonTest', batchSize=2) globs['tempdir'] = tempfile.mkdtemp() atexit.register(lambda: shutil.rmtree(globs['tempdir'])) (failure_count, test_count) = doctest.testmod(globs=globs) globs['sc'].stop() if failure_count: exit(-1) if __name__ == "__main__": _test()
"""Xbox Media Source Implementation.""" from dataclasses import dataclass from typing import List, Tuple from pydantic.error_wrappers import ValidationError # pylint: disable=no-name-in-module from xbox.webapi.api.client import XboxLiveClient from xbox.webapi.api.provider.catalog.models import FieldsTemplate, Image from xbox.webapi.api.provider.gameclips.models import GameclipsResponse from xbox.webapi.api.provider.screenshots.models import ScreenshotResponse from xbox.webapi.api.provider.smartglass.models import InstalledPackage from homeassistant.components.media_player.const import ( MEDIA_CLASS_DIRECTORY, MEDIA_CLASS_GAME, MEDIA_CLASS_IMAGE, MEDIA_CLASS_VIDEO, ) from homeassistant.components.media_source.const import MEDIA_MIME_TYPES from homeassistant.components.media_source.models import ( BrowseMediaSource, MediaSource, MediaSourceItem, PlayMedia, ) from homeassistant.core import callback from homeassistant.helpers.typing import HomeAssistantType from homeassistant.util import dt as dt_util from .browse_media import _find_media_image from .const import DOMAIN MIME_TYPE_MAP = { "gameclips": "video/mp4", "screenshots": "image/png", } MEDIA_CLASS_MAP = { "gameclips": MEDIA_CLASS_VIDEO, "screenshots": MEDIA_CLASS_IMAGE, } async def async_get_media_source(hass: HomeAssistantType): """Set up Xbox media source.""" entry = hass.config_entries.async_entries(DOMAIN)[0] client = hass.data[DOMAIN][entry.entry_id]["client"] return XboxSource(hass, client) @callback def async_parse_identifier( item: MediaSourceItem, ) -> Tuple[str, str, str]: """Parse identifier.""" identifier = item.identifier or "" start = ["", "", ""] items = identifier.lstrip("/").split("~~", 2) return tuple(items + start[len(items) :]) @dataclass class XboxMediaItem: """Represents gameclip/screenshot media.""" caption: str thumbnail: str uri: str media_class: str class XboxSource(MediaSource): """Provide Xbox screenshots and gameclips as media sources.""" name: str = "Xbox Game Media" def __init__(self, hass: HomeAssistantType, client: XboxLiveClient): """Initialize Xbox source.""" super().__init__(DOMAIN) self.hass: HomeAssistantType = hass self.client: XboxLiveClient = client async def async_resolve_media(self, item: MediaSourceItem) -> PlayMedia: """Resolve media to a url.""" _, category, url = async_parse_identifier(item) kind = category.split("#", 1)[1] return PlayMedia(url, MIME_TYPE_MAP[kind]) async def async_browse_media( self, item: MediaSourceItem, media_types: Tuple[str] = MEDIA_MIME_TYPES ) -> BrowseMediaSource: """Return media.""" title, category, _ = async_parse_identifier(item) if not title: return await self._build_game_library() if not category: return _build_categories(title) return await self._build_media_items(title, category) async def _build_game_library(self): """Display installed games across all consoles.""" apps = await self.client.smartglass.get_installed_apps() games = { game.one_store_product_id: game for game in apps.result if game.is_game and game.title_id } app_details = await self.client.catalog.get_products( games.keys(), FieldsTemplate.BROWSE, ) images = { prod.product_id: prod.localized_properties[0].images for prod in app_details.products } return BrowseMediaSource( domain=DOMAIN, identifier="", media_class=MEDIA_CLASS_DIRECTORY, media_content_type="", title="Xbox Game Media", can_play=False, can_expand=True, children=[_build_game_item(game, images) for game in games.values()], children_media_class=MEDIA_CLASS_GAME, ) async def _build_media_items(self, title, category): """Fetch requested gameclip/screenshot media.""" title_id, _, thumbnail = title.split("#", 2) owner, kind = category.split("#", 1) items: List[XboxMediaItem] = [] try: if kind == "gameclips": if owner == "my": response: GameclipsResponse = ( await self.client.gameclips.get_recent_clips_by_xuid( self.client.xuid, title_id ) ) elif owner == "community": response: GameclipsResponse = await self.client.gameclips.get_recent_community_clips_by_title_id( title_id ) else: return None items = [ XboxMediaItem( item.user_caption or dt_util.as_local( dt_util.parse_datetime(item.date_recorded) ).strftime("%b. %d, %Y %I:%M %p"), item.thumbnails[0].uri, item.game_clip_uris[0].uri, MEDIA_CLASS_VIDEO, ) for item in response.game_clips ] elif kind == "screenshots": if owner == "my": response: ScreenshotResponse = ( await self.client.screenshots.get_recent_screenshots_by_xuid( self.client.xuid, title_id ) ) elif owner == "community": response: ScreenshotResponse = await self.client.screenshots.get_recent_community_screenshots_by_title_id( title_id ) else: return None items = [ XboxMediaItem( item.user_caption or dt_util.as_local(item.date_taken).strftime( "%b. %d, %Y %I:%M%p" ), item.thumbnails[0].uri, item.screenshot_uris[0].uri, MEDIA_CLASS_IMAGE, ) for item in response.screenshots ] except ValidationError: # Unexpected API response pass return BrowseMediaSource( domain=DOMAIN, identifier=f"{title}~~{category}", media_class=MEDIA_CLASS_DIRECTORY, media_content_type="", title=f"{owner.title()} {kind.title()}", can_play=False, can_expand=True, children=[_build_media_item(title, category, item) for item in items], children_media_class=MEDIA_CLASS_MAP[kind], thumbnail=thumbnail, ) def _build_game_item(item: InstalledPackage, images: List[Image]): """Build individual game.""" thumbnail = "" image = _find_media_image(images.get(item.one_store_product_id, [])) if image is not None: thumbnail = image.uri if thumbnail[0] == "/": thumbnail = f"https:{thumbnail}" return BrowseMediaSource( domain=DOMAIN, identifier=f"{item.title_id}#{item.name}#{thumbnail}", media_class=MEDIA_CLASS_GAME, media_content_type="", title=item.name, can_play=False, can_expand=True, children_media_class=MEDIA_CLASS_DIRECTORY, thumbnail=thumbnail, ) def _build_categories(title): """Build base categories for Xbox media.""" _, name, thumbnail = title.split("#", 2) base = BrowseMediaSource( domain=DOMAIN, identifier=f"{title}", media_class=MEDIA_CLASS_GAME, media_content_type="", title=name, can_play=False, can_expand=True, children=[], children_media_class=MEDIA_CLASS_DIRECTORY, thumbnail=thumbnail, ) owners = ["my", "community"] kinds = ["gameclips", "screenshots"] for owner in owners: for kind in kinds: base.children.append( BrowseMediaSource( domain=DOMAIN, identifier=f"{title}~~{owner}#{kind}", media_class=MEDIA_CLASS_DIRECTORY, media_content_type="", title=f"{owner.title()} {kind.title()}", can_play=False, can_expand=True, children_media_class=MEDIA_CLASS_MAP[kind], ) ) return base def _build_media_item(title: str, category: str, item: XboxMediaItem): """Build individual media item.""" kind = category.split("#", 1)[1] return BrowseMediaSource( domain=DOMAIN, identifier=f"{title}~~{category}~~{item.uri}", media_class=item.media_class, media_content_type=MIME_TYPE_MAP[kind], title=item.caption, can_play=True, can_expand=False, thumbnail=item.thumbnail, )
import logging from decimal import Decimal as D from django.conf import settings from django.contrib import messages from django.contrib.auth.models import AnonymousUser from django.http import HttpResponse, HttpResponseBadRequest from django.shortcuts import get_object_or_404, redirect from django.urls import reverse from django.utils.http import urlencode from django.utils.translation import gettext_lazy as _ from django.views.generic import RedirectView, View from oscar.apps.payment.exceptions import UnableToTakePayment from oscar.apps.shipping.methods import FixedPrice, NoShippingRequired from oscar.core.exceptions import ModuleNotFoundError from oscar.core.loading import get_class, get_model from paypal.exceptions import PayPalError from paypal.express.exceptions import ( EmptyBasketException, InvalidBasket, MissingShippingAddressException, MissingShippingMethodException) from paypal.express.facade import confirm_transaction, fetch_transaction_details, get_paypal_url from paypal.express.gateway import buyer_pays_on_paypal # Load views dynamically PaymentDetailsView = get_class('checkout.views', 'PaymentDetailsView') CheckoutSessionMixin = get_class('checkout.session', 'CheckoutSessionMixin') ShippingAddress = get_model('order', 'ShippingAddress') Country = get_model('address', 'Country') Basket = get_model('basket', 'Basket') Repository = get_class('shipping.repository', 'Repository') Selector = get_class('partner.strategy', 'Selector') Source = get_model('payment', 'Source') SourceType = get_model('payment', 'SourceType') try: Applicator = get_class('offer.applicator', 'Applicator') except ModuleNotFoundError: # fallback for django-oscar<=1.1 Applicator = get_class('offer.utils', 'Applicator') logger = logging.getLogger('paypal.express') class RedirectView(CheckoutSessionMixin, RedirectView): """ Initiate the transaction with Paypal and redirect the user to PayPal's Express Checkout to perform the transaction. """ permanent = False # Setting to distinguish if the site has already collected a shipping # address. This is False when redirecting to PayPal straight from the # basket page but True when redirecting from checkout. as_payment_method = False def get_redirect_url(self, **kwargs): try: basket = self.build_submission()['basket'] url = self._get_redirect_url(basket, **kwargs) except PayPalError as ppe: messages.error(self.request, str(ppe)) if self.as_payment_method: url = reverse('checkout:payment-details') else: url = reverse('basket:summary') return url except InvalidBasket as e: messages.warning(self.request, str(e)) return reverse('basket:summary') except EmptyBasketException: messages.error(self.request, _("Your basket is empty")) return reverse('basket:summary') except MissingShippingAddressException: messages.error( self.request, _("A shipping address must be specified")) return reverse('checkout:shipping-address') except MissingShippingMethodException: messages.error( self.request, _("A shipping method must be specified")) return reverse('checkout:shipping-method') else: # Transaction successfully registered with PayPal. Now freeze the # basket so it can't be edited while the customer is on the PayPal # site. basket.freeze() logger.info("Basket #%s - redirecting to %s", basket.id, url) return url def _get_redirect_url(self, basket, **kwargs): if basket.is_empty: raise EmptyBasketException() params = { 'basket': basket, 'shipping_methods': [] # setup a default empty list } # to support no_shipping user = self.request.user if self.as_payment_method: if basket.is_shipping_required(): # Only check for shipping details if required. shipping_addr = self.get_shipping_address(basket) if not shipping_addr: raise MissingShippingAddressException() shipping_method = self.get_shipping_method( basket, shipping_addr) if not shipping_method: raise MissingShippingMethodException() params['shipping_address'] = shipping_addr params['shipping_method'] = shipping_method params['shipping_methods'] = [] else: # Maik doubts that this code ever worked. Assigning # shipping method instances to Paypal params # isn't going to work, is it? shipping_methods = Repository().get_shipping_methods( user=user, basket=basket, request=self.request) params['shipping_methods'] = shipping_methods if settings.DEBUG: # Determine the localserver's hostname to use when # in testing mode params['host'] = self.request.META['HTTP_HOST'] if user.is_authenticated: params['user'] = user params['paypal_params'] = self._get_paypal_params() return get_paypal_url(**params) def _get_paypal_params(self): """ Return any additional PayPal parameters """ return {} class CancelResponseView(RedirectView): permanent = False def get(self, request, *args, **kwargs): basket = get_object_or_404(Basket, id=kwargs['basket_id'], status=Basket.FROZEN) basket.thaw() logger.info("Payment cancelled (token %s) - basket #%s thawed", request.GET.get('token', '<no token>'), basket.id) return super(CancelResponseView, self).get(request, *args, **kwargs) def get_redirect_url(self, **kwargs): messages.error(self.request, _("PayPal transaction cancelled")) return reverse('basket:summary') # Upgrading notes: when we drop support for Oscar 0.6, this class can be # refactored to pass variables around more explicitly (instead of assigning # things to self so they are accessible in a later method). class SuccessResponseView(PaymentDetailsView): template_name_preview = 'paypal/express/preview.html' preview = True error_message = _("A problem occurred communicating with PayPal - please try again later") @property def pre_conditions(self): return [] def get(self, request, *args, **kwargs): """ Fetch details about the successful transaction from PayPal. We use these details to show a preview of the order with a 'submit' button to place it. The preview step can be skipped with `PAYPAL_BUYER_PAYS_ON_PAYPAL=True` inside settings. """ try: self.payer_id = request.GET['PayerID'] self.token = request.GET['token'] except KeyError: # Manipulation - redirect to basket page with warning message logger.warning("Missing GET params on success response page") messages.error(self.request, _("Unable to determine PayPal transaction details")) return redirect('basket:summary') try: self.txn = fetch_transaction_details(self.token) except PayPalError as e: logger.warning("Unable to fetch transaction details for token %s: %s", self.token, e) messages.error(self.request, self.error_message) return redirect('basket:summary') # Reload frozen basket which is specified in the URL kwargs['basket'] = self.load_frozen_basket(kwargs['basket_id']) if not kwargs['basket']: logger.warning("Unable to load frozen basket with ID %s", kwargs['basket_id']) messages.error(self.request, _("No basket was found that corresponds to your PayPal transaction")) return redirect('basket:summary') if buyer_pays_on_paypal(): return self.submit(**self.build_submission(basket=kwargs['basket'])) logger.info( "Basket #%s - showing preview with payer ID %s and token %s", kwargs['basket'].id, self.payer_id, self.token) return super(SuccessResponseView, self).get(request, *args, **kwargs) def load_frozen_basket(self, basket_id): # Lookup the frozen basket that this txn corresponds to try: basket = Basket.objects.get(id=basket_id, status=Basket.FROZEN) except Basket.DoesNotExist: return None # Assign strategy to basket instance if Selector: basket.strategy = Selector().strategy(self.request) # Re-apply any offers Applicator().apply(basket, self.request.user, request=self.request) return basket def get_context_data(self, **kwargs): ctx = super(SuccessResponseView, self).get_context_data(**kwargs) if not hasattr(self, 'payer_id'): return ctx # This context generation only runs when in preview mode ctx.update({ 'payer_id': self.payer_id, 'token': self.token, 'paypal_user_email': self.txn.value('EMAIL'), 'paypal_amount': D(self.txn.value('AMT')), }) return ctx def post(self, request, *args, **kwargs): """ Place an order. We fetch the txn details again and then proceed with oscar's standard payment details view for placing the order. """ if buyer_pays_on_paypal(): return HttpResponseBadRequest() # we don't expect any user here if we let users buy on PayPal try: self.payer_id = request.POST['payer_id'] self.token = request.POST['token'] except KeyError: # Probably suspicious manipulation if we get here messages.error(self.request, self.error_message) return redirect('basket:summary') try: self.txn = fetch_transaction_details(self.token) except PayPalError: # Unable to fetch txn details from PayPal - we have to bail out messages.error(self.request, self.error_message) return redirect('basket:summary') # Reload frozen basket which is specified in the URL basket = self.load_frozen_basket(kwargs['basket_id']) if not basket: messages.error(self.request, self.error_message) return redirect('basket:summary') submission = self.build_submission(basket=basket) return self.submit(**submission) def build_submission(self, **kwargs): submission = super( SuccessResponseView, self).build_submission(**kwargs) # Pass the user email so it can be stored with the order submission['order_kwargs']['guest_email'] = self.txn.value('EMAIL') # Pass PP params submission['payment_kwargs']['payer_id'] = self.payer_id submission['payment_kwargs']['token'] = self.token submission['payment_kwargs']['txn'] = self.txn return submission def handle_payment(self, order_number, total, **kwargs): """ Complete payment with PayPal - this calls the 'DoExpressCheckout' method to capture the money from the initial transaction. """ try: confirm_txn = confirm_transaction( kwargs['payer_id'], kwargs['token'], kwargs['txn'].amount, kwargs['txn'].currency) except PayPalError: raise UnableToTakePayment() if not confirm_txn.is_successful: raise UnableToTakePayment() # Record payment source and event source_type, is_created = SourceType.objects.get_or_create( name='PayPal') source = Source(source_type=source_type, currency=confirm_txn.currency, amount_allocated=confirm_txn.amount, amount_debited=confirm_txn.amount, reference=confirm_txn.token) self.add_payment_source(source) self.add_payment_event('Settled', confirm_txn.amount, reference=confirm_txn.correlation_id) def get_shipping_address(self, basket): """ Return a created shipping address instance, created using the data returned by PayPal. """ # Determine names - PayPal uses a single field ship_to_name = self.txn.value('PAYMENTREQUEST_0_SHIPTONAME') if ship_to_name is None: return None first_name = last_name = '' parts = ship_to_name.split() if len(parts) == 1: last_name = ship_to_name elif len(parts) > 1: first_name = parts[0] last_name = " ".join(parts[1:]) return ShippingAddress( first_name=first_name, last_name=last_name, line1=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET'), line2=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTREET2', default=""), line4=self.txn.value('PAYMENTREQUEST_0_SHIPTOCITY', default=""), state=self.txn.value('PAYMENTREQUEST_0_SHIPTOSTATE', default=""), postcode=self.txn.value('PAYMENTREQUEST_0_SHIPTOZIP', default=""), country=Country.objects.get(iso_3166_1_a2=self.txn.value('PAYMENTREQUEST_0_SHIPTOCOUNTRYCODE')), phone_number=self.txn.value('PAYMENTREQUEST_0_SHIPTOPHONENUM', default=""), ) def _get_shipping_method_by_name(self, name, basket, shipping_address=None): methods = Repository().get_shipping_methods( basket=basket, user=self.request.user, shipping_addr=shipping_address, request=self.request) for method in methods: if method.name == name: return method def get_shipping_method(self, basket, shipping_address=None, **kwargs): """ Return the shipping method used """ if not basket.is_shipping_required(): return NoShippingRequired() # Instantiate a new FixedPrice shipping method instance charge_incl_tax = D(self.txn.value('PAYMENTREQUEST_0_SHIPPINGAMT')) # Assume no tax for now charge_excl_tax = charge_incl_tax name = self.txn.value('SHIPPINGOPTIONNAME') session_method = super(SuccessResponseView, self).get_shipping_method( basket, shipping_address, **kwargs) if not session_method or (name and name != session_method.name): if name: method = self._get_shipping_method_by_name(name, basket, shipping_address) else: method = None if not method: method = FixedPrice(charge_excl_tax, charge_incl_tax) if session_method: method.name = session_method.name method.code = session_method.code else: method = session_method return method class ShippingOptionsView(View): def get(self, request, *args, **kwargs): """ We use the shipping address given to use by PayPal to determine the available shipping method """ # Basket ID is passed within the URL path. We need to do this as some # shipping options depend on the user and basket contents. PayPal do # pass back details of the basket contents but it would be royal pain to # reconstitute the basket based on those - easier to just to piggy-back # the basket ID in the callback URL. basket = get_object_or_404(Basket, id=kwargs['basket_id']) user = basket.owner if not user: user = AnonymousUser() # Create a shipping address instance using the data passed back country_code = self.request.GET.get( 'SHIPTOCOUNTRY', None) try: country = Country.objects.get(iso_3166_1_a2=country_code) except Country.DoesNotExist: country = Country() shipping_address = ShippingAddress( line1=self.request.GET.get('SHIPTOSTREET', ''), line2=self.request.GET.get('SHIPTOSTREET2', ''), line4=self.request.GET.get('SHIPTOCITY', ''), state=self.request.GET.get('SHIPTOSTATE', ''), postcode=self.request.GET.get('SHIPTOZIP', ''), country=country ) methods = Repository().get_shipping_methods( basket=basket, shipping_addr=shipping_address, request=self.request, user=user) return self.render_to_response(methods, basket) def post(self, request, *args, **kwargs): """ We use the shipping address given to use by PayPal to determine the available shipping method """ # Basket ID is passed within the URL path. We need to do this as some # shipping options depend on the user and basket contents. PayPal do # pass back details of the basket contents but it would be royal pain to # reconstitute the basket based on those - easier to just to piggy-back # the basket ID in the callback URL. basket = get_object_or_404(Basket, id=kwargs['basket_id']) user = basket.owner if not user: user = AnonymousUser() # Create a shipping address instance using the data passed back country_code = self.request.POST.get( 'SHIPTOCOUNTRY', None) try: country = Country.objects.get(iso_3166_1_a2=country_code) except Country.DoesNotExist: country = Country() shipping_address = ShippingAddress( line1=self.request.POST.get('SHIPTOSTREET', ''), line2=self.request.POST.get('SHIPTOSTREET2', ''), line4=self.request.POST.get('SHIPTOCITY', ''), state=self.request.POST.get('SHIPTOSTATE', ''), postcode=self.request.POST.get('SHIPTOZIP', ''), country=country ) methods = Repository().get_shipping_methods( basket=basket, shipping_addr=shipping_address, request=self.request, user=user) return self.render_to_response(methods, basket) def render_to_response(self, methods, basket): pairs = [ ('METHOD', 'CallbackResponse'), ('CALLBACKVERSION', '61.0'), ('CURRENCYCODE', self.request.POST.get('CURRENCYCODE', 'GBP')), ] if methods: for index, method in enumerate(methods): charge = method.calculate(basket).incl_tax pairs.append(('L_SHIPPINGOPTIONNAME%d' % index, str(method.name))) pairs.append(('L_SHIPPINGOPTIONLABEL%d' % index, str(method.description))) pairs.append(('L_SHIPPINGOPTIONAMOUNT%d' % index, charge)) # For now, we assume tax and insurance to be zero pairs.append(('L_TAXAMT%d' % index, D('0.00'))) pairs.append(('L_INSURANCEAMT%d' % index, D('0.00'))) # We assume that the first returned method is the default one pairs.append(('L_SHIPPINGOPTIONISDEFAULT%d' % index, 1 if index == 0 else 0)) else: # No shipping methods available - we flag this up to PayPal indicating that we # do not ship to the shipping address. pairs.append(('NO_SHIPPING_OPTION_DETAILS', 1)) payload = urlencode(pairs) logger.debug("Basket #%s - returning postage costs payload = '%s'", basket.id, payload) return HttpResponse(payload)
from datetime import datetime, timedelta import socket from twisted.web import resource, static from twisted.application.service import IServiceCollection from scrapy.utils.misc import load_object from .interfaces import IPoller, IEggStorage, ISpiderScheduler from six.moves.urllib.parse import urlparse class Root(resource.Resource): def __init__(self, config, app): resource.Resource.__init__(self) self.debug = config.getboolean('debug', False) self.runner = config.get('runner') logsdir = config.get('logs_dir') itemsdir = config.get('items_dir') local_items = itemsdir and (urlparse(itemsdir).scheme.lower() in ['', 'file']) self.app = app self.nodename = config.get('node_name', socket.gethostname()) self.putChild(b'', Home(self, local_items)) if logsdir: self.putChild(b'logs', static.File(logsdir.encode('ascii', 'ignore'), 'text/plain')) if local_items: self.putChild(b'items', static.File(itemsdir, 'text/plain')) self.putChild(b'jobs', Jobs(self, local_items)) services = config.items('services', ()) for servName, servClsName in services: servCls = load_object(servClsName) self.putChild(servName.encode('utf-8'), servCls(self)) self.update_projects() def update_projects(self): self.poller.update_projects() self.scheduler.update_projects() @property def launcher(self): app = IServiceCollection(self.app, self.app) return app.getServiceNamed('launcher') @property def scheduler(self): return self.app.getComponent(ISpiderScheduler) @property def eggstorage(self): return self.app.getComponent(IEggStorage) @property def poller(self): return self.app.getComponent(IPoller) class Home(resource.Resource): def __init__(self, root, local_items): resource.Resource.__init__(self) self.root = root self.local_items = local_items def render_GET(self, txrequest): vars = { 'projects': ', '.join(self.root.scheduler.list_projects()) } s = """ <html> <head><title>Scrapyd</title></head> <body> <h1>Scrapyd</h1> <p>Available projects: <b>%(projects)s</b></p> <ul> <li><a href="/jobs">Jobs</a></li> """ % vars if self.local_items: s += '<li><a href="/items/">Items</a></li>' s += """ <li><a href="/logs/">Logs</a></li> <li><a href="http://scrapyd.readthedocs.org/en/latest/">Documentation</a></li> </ul> <h2>How to schedule a spider?</h2> <p>To schedule a spider you need to use the API (this web UI is only for monitoring)</p> <p>Example using <a href="http://curl.haxx.se/">curl</a>:</p> <p><code>curl http://localhost:6800/schedule.json -d project=default -d spider=somespider</code></p> <p>For more information about the API, see the <a href="http://scrapyd.readthedocs.org/en/latest/">Scrapyd documentation</a></p> </body> </html> """ % vars return s.encode('utf-8') def microsec_trunc(timelike): if hasattr(timelike, 'microsecond'): ms = timelike.microsecond else: ms = timelike.microseconds return timelike - timedelta(microseconds=ms) class Jobs(resource.Resource): def __init__(self, root, local_items): resource.Resource.__init__(self) self.root = root self.local_items = local_items cancel_button = """ <form method="post" action="/cancel.json"> <input type="hidden" name="project" value="{project}"/> <input type="hidden" name="job" value="{jobid}"/> <input type="submit" style="float: left;" value="Cancel"/> </form> """.format header_cols = [ 'Project', 'Spider', 'Job', 'PID', 'Start', 'Runtime', 'Finish', 'Log', 'Items', 'Cancel', ] def gen_css(self): css = [ '#jobs>thead td {text-align: center; font-weight: bold}', '#jobs>tbody>tr:first-child {background-color: #eee}', ] if not self.local_items: col_idx = self.header_cols.index('Items') + 1 css.append('#jobs>*>tr>*:nth-child(%d) {display: none}' % col_idx) if b'cancel.json' not in self.root.children: col_idx = self.header_cols.index('Cancel') + 1 css.append('#jobs>*>tr>*:nth-child(%d) {display: none}' % col_idx) return '\n'.join(css) def prep_row(self, cells): if not isinstance(cells, dict): assert len(cells) == len(self.header_cols) else: cells = [cells.get(k) for k in self.header_cols] cells = ['<td>%s</td>' % ('' if c is None else c) for c in cells] return '<tr>%s</tr>' % ''.join(cells) def prep_doc(self): return ( '<html>' '<head>' '<title>Scrapyd</title>' '<style type="text/css">' + self.gen_css() + '</style>' '</head>' '<body><h1>Jobs</h1>' '<p><a href="..">Go up</a></p>' + self.prep_table() + '</body>' '</html>' ) def prep_table(self): return ( '<table id="jobs" border="1">' '<thead>' + self.prep_row(self.header_cols) + '</thead>' '<tbody>' + '<tr><th colspan="%d">Pending</th></tr>' % len(self.header_cols) + self.prep_tab_pending() + '</tbody>' '<tbody>' + '<tr><th colspan="%d">Running</th></tr>' % len(self.header_cols) + self.prep_tab_running() + '</tbody>' '<tbody>' + '<tr><th colspan="%d">Finished</th></tr>' % len(self.header_cols) + self.prep_tab_finished() + '</tbody>' '</table>' ) def prep_tab_pending(self): return '\n'.join( self.prep_row(dict( Project=project, Spider=m['name'], Job=m['_job'], Cancel=self.cancel_button(project=project, jobid=m['_job']) )) for project, queue in self.root.poller.queues.items() for m in queue.list() ) def prep_tab_running(self): return '\n'.join( self.prep_row(dict( Project=p.project, Spider=p.spider, Job=p.job, PID=p.pid, Start=microsec_trunc(p.start_time), Runtime=microsec_trunc(datetime.now() - p.start_time), Log='<a href="/logs/%s/%s/%s.log">Log</a>' % (p.project, p.spider, p.job), Items='<a href="/items/%s/%s/%s.jl">Items</a>' % (p.project, p.spider, p.job), Cancel=self.cancel_button(project=p.project, jobid=p.job) )) for p in self.root.launcher.processes.values() ) def prep_tab_finished(self): return '\n'.join( self.prep_row(dict( Project=p.project, Spider=p.spider, Job=p.job, Start=microsec_trunc(p.start_time), Runtime=microsec_trunc(p.end_time - p.start_time), Finish=microsec_trunc(p.end_time), Log='<a href="/logs/%s/%s/%s.log">Log</a>' % (p.project, p.spider, p.job), Items='<a href="/items/%s/%s/%s.jl">Items</a>' % (p.project, p.spider, p.job), )) for p in self.root.launcher.finished ) def render(self, txrequest): doc = self.prep_doc() txrequest.setHeader('Content-Type', 'text/html; charset=utf-8') txrequest.setHeader('Content-Length', str(len(doc))) return doc.encode('utf-8')
# Copyright (c) 2015 Orange. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. from sqlalchemy import orm from sqlalchemy import sql from neutron.db.models import l3 from neutron.db import models_v2 from neutron.debug import debug_agent from neutron_lib.api.definitions import portbindings from neutron_lib.callbacks import events from neutron_lib.callbacks import registry from neutron_lib.callbacks import resources from neutron_lib import constants as const from neutron_lib.db import api as db_api from oslo_log import helpers as log_helpers from oslo_log import log as logging from networking_bagpipe.agent.bgpvpn import rpc_client from networking_bgpvpn.neutron.db import bgpvpn_db from networking_bgpvpn.neutron.services.common import utils from networking_bgpvpn.neutron.services.service_drivers.bagpipe \ import bagpipe_v2 as v2 LOG = logging.getLogger(__name__) @log_helpers.log_method_call @db_api.CONTEXT_READER def get_network_info_for_port(context, port_id, network_id): """Get MAC, IP and Gateway IP addresses informations for a specific port""" try: net_info = (context.session. query(models_v2.Port.mac_address, models_v2.IPAllocation.ip_address, models_v2.Subnet.cidr, models_v2.Subnet.gateway_ip). join(models_v2.IPAllocation, models_v2.IPAllocation.port_id == models_v2.Port.id). join(models_v2.Subnet, models_v2.IPAllocation.subnet_id == models_v2.Subnet.id). filter(models_v2.Subnet.ip_version == 4). filter(models_v2.Port.id == port_id).one()) (mac_address, ip_address, cidr, gateway_ip) = net_info except orm.exc.NoResultFound: return gateway_mac = ( context.session. query(models_v2.Port.mac_address). filter( models_v2.Port.network_id == network_id, (models_v2.Port.device_owner == const.DEVICE_OWNER_ROUTER_INTF) ). one_or_none() ) return {'mac_address': mac_address, 'ip_address': ip_address + cidr[cidr.index('/'):], 'gateway_ip': gateway_ip, 'gateway_mac': gateway_mac[0] if gateway_mac else None} @db_api.CONTEXT_READER def get_gateway_mac(context, network_id): gateway_mac = ( context.session. query(models_v2.Port.mac_address). filter( models_v2.Port.network_id == network_id, (models_v2.Port.device_owner == const.DEVICE_OWNER_ROUTER_INTF) ). one_or_none() ) return gateway_mac[0] if gateway_mac else None @db_api.CONTEXT_READER def get_network_ports(context, network_id): # NOTE(tmorin): currents callers don't look at detailed results # but only test if at least one result exist => can be optimized # by returning a count, rather than all port information return (context.session.query(models_v2.Port). filter(models_v2.Port.network_id == network_id, models_v2.Port.admin_state_up == sql.true()).all()) @db_api.CONTEXT_READER def get_router_ports(context, router_id): return ( context.session.query(models_v2.Port). filter( models_v2.Port.device_id == router_id, models_v2.Port.device_owner == const.DEVICE_OWNER_ROUTER_INTF ).all() ) @db_api.CONTEXT_READER def get_router_bgpvpn_assocs(context, router_id): return ( context.session.query(bgpvpn_db.BGPVPNRouterAssociation). filter( bgpvpn_db.BGPVPNRouterAssociation.router_id == router_id ).all() ) @db_api.CONTEXT_READER def get_network_bgpvpn_assocs(context, net_id): return ( context.session.query(bgpvpn_db.BGPVPNNetAssociation). filter( bgpvpn_db.BGPVPNNetAssociation.network_id == net_id ).all() ) @db_api.CONTEXT_READER def get_bgpvpns_of_router_assocs_by_network(context, net_id): return ( context.session.query(bgpvpn_db.BGPVPN). join(bgpvpn_db.BGPVPN.router_associations). join(bgpvpn_db.BGPVPNRouterAssociation.router). join(l3.Router.attached_ports). join(l3.RouterPort.port). filter( models_v2.Port.network_id == net_id ).all() ) @db_api.CONTEXT_READER def get_networks_for_router(context, router_id): ports = get_router_ports(context, router_id) if ports: return {port['network_id'] for port in ports} else: return [] def _log_callback_processing_exception(resource, event, trigger, kwargs, e): LOG.exception("Error during notification processing " "%(resource)s %(event)s, %(trigger)s, " "%(kwargs)s: %(exc)s", {'trigger': trigger, 'resource': resource, 'event': event, 'kwargs': kwargs, 'exc': e}) @registry.has_registry_receivers class BaGPipeBGPVPNDriver(v2.BaGPipeBGPVPNDriver): """BGPVPN Service Driver class for BaGPipe""" def __init__(self, service_plugin): super(BaGPipeBGPVPNDriver, self).__init__(service_plugin) self.agent_rpc = rpc_client.BGPVPNAgentNotifyApi() def _format_bgpvpn(self, context, bgpvpn, network_id): """JSON-format BGPVPN BGPVPN, network identifiers, and route targets. """ formatted_bgpvpn = {'id': bgpvpn['id'], 'network_id': network_id, 'gateway_mac': get_gateway_mac(context, network_id)} formatted_bgpvpn.update( self._format_bgpvpn_network_route_targets([bgpvpn])) return formatted_bgpvpn def _format_bgpvpn_network_route_targets(self, bgpvpns): """Format BGPVPN network informations (VPN type and route targets) [{ 'type': 'l3', 'route_targets': ['12345:1', '12345:2'], 'import_targets': ['12345:3'], 'export_targets': ['12345:4'] }, { 'type': 'l3', 'route_targets': ['12346:1'] }, { 'type': 'l2', 'route_targets': ['12347:1'] } ] to { 'l3vpn' : { 'import_rt': ['12345:1', '12345:2', '12345:3', '12346:1'], 'export_rt': ['12345:1', '12345:2', '12345:4', '12346:1'] }, 'l2vpn' : { 'import_rt': ['12347:1'], 'export_rt': ['12347:1'] } } """ bgpvpn_rts = {} for bgpvpn in bgpvpns: # Add necessary keys to BGP VPN route targets dictionary if bgpvpn['type'] + 'vpn' not in bgpvpn_rts: bgpvpn_rts.update( {bgpvpn['type'] + 'vpn': {'import_rt': [], 'export_rt': []}} ) if 'route_targets' in bgpvpn: bgpvpn_rts[bgpvpn['type'] + 'vpn']['import_rt'] += ( bgpvpn['route_targets'] ) bgpvpn_rts[bgpvpn['type'] + 'vpn']['export_rt'] += ( bgpvpn['route_targets'] ) if 'import_targets' in bgpvpn: bgpvpn_rts[bgpvpn['type'] + 'vpn']['import_rt'] += ( bgpvpn['import_targets'] ) if 'export_targets' in bgpvpn: bgpvpn_rts[bgpvpn['type'] + 'vpn']['export_rt'] += ( bgpvpn['export_targets'] ) for attribute in ('import_rt', 'export_rt'): if bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute]: bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute] = list( set(bgpvpn_rts[bgpvpn['type'] + 'vpn'][attribute])) return bgpvpn_rts def _bgpvpns_for_network(self, context, network_id): return ( self.bgpvpn_db.get_bgpvpns( context, filters={ 'networks': [network_id], }, ) or self.retrieve_bgpvpns_of_router_assocs_by_network(context, network_id) ) def _networks_for_bgpvpn(self, context, bgpvpn): networks = [] networks.extend(bgpvpn['networks']) for router_id in bgpvpn['routers']: networks.extend(get_networks_for_router(context, router_id)) return list(set(networks)) def _retrieve_bgpvpn_network_info_for_port(self, context, port): """Retrieve BGP VPN network informations for a specific port { 'network_id': <UUID>, 'mac_address': '00:00:de:ad:be:ef', 'ip_address': '10.0.0.2', 'gateway_ip': '10.0.0.1', 'gateway_mac': 'aa:bb:cc:dd:ee:ff', # if a router interface exists 'l3vpn' : { 'import_rt': ['12345:1', '12345:2', '12345:3'], 'export_rt': ['12345:1', '12345:2', '12345:4'] } } """ port_id = port['id'] network_id = port['network_id'] bgpvpn_network_info = {} bgpvpns = self._bgpvpns_for_network(context, network_id) # NOTE(tmorin): We currently need to send 'network_id', 'mac_address', # 'ip_address', 'gateway_ip' to the agent, even in the absence of # a BGPVPN bound to the port. If we don't this information will # lack on an update_bgpvpn RPC. When the agent will have the ability # to retrieve this info by itself, we'll change this method # to return {} if there is no bound bgpvpn. bgpvpn_rts = self._format_bgpvpn_network_route_targets(bgpvpns) LOG.debug("Port connected on BGPVPN network %s with route targets " "%s" % (network_id, bgpvpn_rts)) bgpvpn_network_info.update(bgpvpn_rts) LOG.debug("Getting port %s network details" % port_id) network_info = get_network_info_for_port(context, port_id, network_id) if not network_info: LOG.warning("No network information for net %s", network_id) return bgpvpn_network_info.update(network_info) return bgpvpn_network_info @db_api.CONTEXT_READER def retrieve_bgpvpns_of_router_assocs_by_network(self, context, network_id): return [self.bgpvpn_db._make_bgpvpn_dict(bgpvpn) for bgpvpn in get_bgpvpns_of_router_assocs_by_network(context, network_id)] def delete_bgpvpn_postcommit(self, context, bgpvpn): for net_id in self._networks_for_bgpvpn(context, bgpvpn): if get_network_ports(context, net_id): # Format BGPVPN before sending notification self.agent_rpc.delete_bgpvpn( context, self._format_bgpvpn(context, bgpvpn, net_id)) def update_bgpvpn_postcommit(self, context, old_bgpvpn, bgpvpn): super(BaGPipeBGPVPNDriver, self).update_bgpvpn_postcommit( context, old_bgpvpn, bgpvpn) (added_keys, removed_keys, changed_keys) = ( utils.get_bgpvpn_differences(bgpvpn, old_bgpvpn)) ATTRIBUTES_TO_IGNORE = set('name') moving_keys = added_keys | removed_keys | changed_keys if len(moving_keys ^ ATTRIBUTES_TO_IGNORE): for net_id in self._networks_for_bgpvpn(context, bgpvpn): if (get_network_ports(context, net_id)): self._update_bgpvpn_for_network(context, net_id, bgpvpn) def _update_bgpvpn_for_net_with_id(self, context, network_id, bgpvpn_id): if get_network_ports(context, network_id): bgpvpn = self.get_bgpvpn(context, bgpvpn_id) self._update_bgpvpn_for_network(context, network_id, bgpvpn) def _update_bgpvpn_for_network(self, context, net_id, bgpvpn): formated_bgpvpn = self._format_bgpvpn(context, bgpvpn, net_id) self.agent_rpc.update_bgpvpn(context, formated_bgpvpn) def create_net_assoc_postcommit(self, context, net_assoc): super(BaGPipeBGPVPNDriver, self).create_net_assoc_postcommit(context, net_assoc) self._update_bgpvpn_for_net_with_id(context, net_assoc['network_id'], net_assoc['bgpvpn_id']) def delete_net_assoc_postcommit(self, context, net_assoc): if get_network_ports(context, net_assoc['network_id']): bgpvpn = self.get_bgpvpn(context, net_assoc['bgpvpn_id']) formated_bgpvpn = self._format_bgpvpn(context, bgpvpn, net_assoc['network_id']) self.agent_rpc.delete_bgpvpn(context, formated_bgpvpn) def _ignore_port(self, context, port): if (port['device_owner'].startswith( const.DEVICE_OWNER_NETWORK_PREFIX) and not port['device_owner'] in (debug_agent.DEVICE_OWNER_COMPUTE_PROBE, debug_agent.DEVICE_OWNER_NETWORK_PROBE)): LOG.info("Port %s owner is network:*, we'll do nothing", port['id']) return True if v2.network_is_external(context, port['network_id']): LOG.info("Port %s is on an external network, we'll do nothing", port['id']) return True return False @log_helpers.log_method_call def notify_port_updated(self, context, port, original_port): if self._ignore_port(context, port): return agent_host = port[portbindings.HOST_ID] port_bgpvpn_info = {'id': port['id'], 'network_id': port['network_id']} if (port['status'] == const.PORT_STATUS_ACTIVE and original_port['status'] != const.PORT_STATUS_ACTIVE): LOG.debug("notify_port_updated, port became ACTIVE") bgpvpn_network_info = ( self._retrieve_bgpvpn_network_info_for_port(context, port) ) if bgpvpn_network_info: port_bgpvpn_info.update(bgpvpn_network_info) self.agent_rpc.attach_port_on_bgpvpn(context, port_bgpvpn_info, agent_host) else: # currently not reached, because we need # _retrieve_bgpvpn_network_info_for_port to always # return network information, even in the absence # of any BGPVPN port bound. pass elif (port['status'] == const.PORT_STATUS_DOWN and original_port['status'] != const.PORT_STATUS_DOWN): LOG.debug("notify_port_updated, port became DOWN") self.agent_rpc.detach_port_from_bgpvpn(context, port_bgpvpn_info, agent_host) else: LOG.debug("new port status is %s, origin status was %s," " => no action", port['status'], original_port['status']) @log_helpers.log_method_call def notify_port_deleted(self, context, port): port_bgpvpn_info = {'id': port['id'], 'network_id': port['network_id']} if self._ignore_port(context, port): return self.agent_rpc.detach_port_from_bgpvpn(context, port_bgpvpn_info, port[portbindings.HOST_ID]) def create_router_assoc_postcommit(self, context, router_assoc): super(BaGPipeBGPVPNDriver, self).create_router_assoc_postcommit( context, router_assoc) for net_id in get_networks_for_router(context, router_assoc['router_id']): self._update_bgpvpn_for_net_with_id(context, net_id, router_assoc['bgpvpn_id']) def delete_router_assoc_postcommit(self, context, router_assoc): for net_id in get_networks_for_router(context, router_assoc['router_id']): net_assoc = {'network_id': net_id, 'bgpvpn_id': router_assoc['bgpvpn_id']} self.delete_net_assoc_postcommit(context, net_assoc) @log_helpers.log_method_call def notify_router_interface_created(self, context, router_id, net_id): super(BaGPipeBGPVPNDriver, self).notify_router_interface_created( context, router_id, net_id) net_assocs = get_network_bgpvpn_assocs(context, net_id) router_assocs = get_router_bgpvpn_assocs(context, router_id) # if this router_interface is on a network bound to a BGPVPN, # or if this router is bound to a BGPVPN, # then we need to send and update for this network, including # the gateway_mac if net_assocs or router_assocs: for bgpvpn in self._bgpvpns_for_network(context, net_id): self._update_bgpvpn_for_network(context, net_id, bgpvpn) for router_assoc in router_assocs: self._update_bgpvpn_for_net_with_id(context, net_id, router_assoc['bgpvpn_id']) @log_helpers.log_method_call def notify_router_interface_deleted(self, context, router_id, net_id): super(BaGPipeBGPVPNDriver, self).notify_router_interface_deleted( context, router_id, net_id) net_assocs = get_network_bgpvpn_assocs(context, net_id) router_assocs = get_router_bgpvpn_assocs(context, router_id) if net_assocs or router_assocs: for bgpvpn in self._bgpvpns_for_network(context, net_id): self._update_bgpvpn_for_network(context, net_id, bgpvpn) for router_assoc in router_assocs: net_assoc = {'network_id': net_id, 'bgpvpn_id': router_assoc['bgpvpn_id']} self.delete_net_assoc_postcommit(context, net_assoc) @registry.receives(resources.PORT, [events.AFTER_UPDATE]) @log_helpers.log_method_call def registry_port_updated(self, resource, event, trigger, payload): try: context = payload.context port = payload.latest_state original_port = payload.states[0] self.notify_port_updated(context, port, original_port) except Exception as e: _log_callback_processing_exception(resource, event, trigger, payload.metadata, e) @registry.receives(resources.PORT, [events.AFTER_DELETE]) @log_helpers.log_method_call def registry_port_deleted(self, resource, event, trigger, payload): try: context = payload.context port = payload.latest_state self.notify_port_deleted(context, port) except Exception as e: _log_callback_processing_exception(resource, event, trigger, payload.metadata, e) # contrary to mother class, no need to subscribe to router interface # before-delete, because after delete, we still can generate RPCs @registry.receives(resources.ROUTER_INTERFACE, [events.AFTER_DELETE]) @log_helpers.log_method_call def registry_router_interface_deleted(self, resource, event, trigger, payload=None): try: context = payload.context # for router_interface after_delete, in stable/newton, the # callback does not include the router_id directly, but we find # it in the port device_id router_id = payload.metadata.get('port')['device_id'] net_id = payload.metadata.get('port')['network_id'] self.notify_router_interface_deleted(context, router_id, net_id) except Exception as e: _log_callback_processing_exception(resource, event, trigger, payload.metadata, e)
# -*- coding: utf-8 -*- from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('microdevices', '0001_initial'), ('cellsamples', '0001_initial'), ('compounds', '0001_initial'), ('auth', '0001_initial'), ('assays', '0001_initial'), ] operations = [ migrations.AddField( model_name='studymodel', name='organ', field=models.ForeignKey(to='microdevices.OrganModel', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='studymodel', name='study_configuration', field=models.ForeignKey(to='assays.StudyConfiguration', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='studyconfiguration', name='created_by', field=models.ForeignKey(related_name='studyconfiguration_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='studyconfiguration', name='modified_by', field=models.ForeignKey(related_name='studyconfiguration_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='studyconfiguration', name='signed_off_by', field=models.ForeignKey(related_name='studyconfiguration_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='readoutunit', name='created_by', field=models.ForeignKey(related_name='readoutunit_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='readoutunit', name='modified_by', field=models.ForeignKey(related_name='readoutunit_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='readoutunit', name='signed_off_by', field=models.ForeignKey(related_name='readoutunit_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='physicalunits', name='created_by', field=models.ForeignKey(related_name='physicalunits_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='physicalunits', name='modified_by', field=models.ForeignKey(related_name='physicalunits_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='physicalunits', name='signed_off_by', field=models.ForeignKey(related_name='physicalunits_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaywelltype', name='created_by', field=models.ForeignKey(related_name='assaywelltype_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaywelltype', name='modified_by', field=models.ForeignKey(related_name='assaywelltype_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaywelltype', name='signed_off_by', field=models.ForeignKey(related_name='assaywelltype_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaywell', name='base_layout', field=models.ForeignKey(to='assays.AssayBaseLayout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaywell', name='created_by', field=models.ForeignKey(related_name='assaywell_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaywell', name='modified_by', field=models.ForeignKey(related_name='assaywell_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaywell', name='signed_off_by', field=models.ForeignKey(related_name='assaywell_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaywell', name='well_type', field=models.ForeignKey(to='assays.AssayWellType', on_delete=models.CASCADE), preserve_default=True, ), migrations.AlterUniqueTogether( name='assaywell', unique_together=set([('base_layout', 'row', 'column')]), ), migrations.AddField( model_name='assaytimepoint', name='assay_layout', field=models.ForeignKey(to='assays.AssayLayout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaytestresult', name='assay_device_readout', field=models.ForeignKey(verbose_name=b'Organ Chip Study', to='assays.AssayRun', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaytestresult', name='chip_setup', field=models.ForeignKey(verbose_name=b'Chip Setup', to='assays.AssayChipSetup', unique=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaytestresult', name='created_by', field=models.ForeignKey(related_name='assaytestresult_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaytestresult', name='group', field=models.ForeignKey(help_text=b'Bind to a group', to='auth.Group', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaytestresult', name='modified_by', field=models.ForeignKey(related_name='assaytestresult_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaytestresult', name='signed_off_by', field=models.ForeignKey(related_name='assaytestresult_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayrun', name='created_by', field=models.ForeignKey(related_name='assayrun_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayrun', name='group', field=models.ForeignKey(help_text=b'Bind to a group', to='auth.Group', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayrun', name='modified_by', field=models.ForeignKey(related_name='assayrun_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayrun', name='signed_off_by', field=models.ForeignKey(related_name='assayrun_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayrun', name='study_configuration', field=models.ForeignKey(blank=True, to='assays.StudyConfiguration', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresulttype', name='created_by', field=models.ForeignKey(related_name='assayresulttype_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresulttype', name='modified_by', field=models.ForeignKey(related_name='assayresulttype_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresulttype', name='signed_off_by', field=models.ForeignKey(related_name='assayresulttype_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresultfunction', name='created_by', field=models.ForeignKey(related_name='assayresultfunction_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresultfunction', name='modified_by', field=models.ForeignKey(related_name='assayresultfunction_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresultfunction', name='signed_off_by', field=models.ForeignKey(related_name='assayresultfunction_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresult', name='assay_name', field=models.ForeignKey(verbose_name=b'Assay', to='assays.AssayChipReadoutAssay', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresult', name='assay_result', field=models.ForeignKey(to='assays.AssayTestResult', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresult', name='result_function', field=models.ForeignKey(verbose_name=b'Function', blank=True, to='assays.AssayResultFunction', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresult', name='result_type', field=models.ForeignKey(verbose_name=b'Measure', blank=True, to='assays.AssayResultType', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayresult', name='test_unit', field=models.ForeignKey(blank=True, to='assays.PhysicalUnits', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayreadout', name='assay_device_readout', field=models.ForeignKey(to='assays.AssayDeviceReadout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayreader', name='created_by', field=models.ForeignKey(related_name='assayreader_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayreader', name='modified_by', field=models.ForeignKey(related_name='assayreader_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayreader', name='signed_off_by', field=models.ForeignKey(related_name='assayreader_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayplatetestresult', name='assay_device_id', field=models.ForeignKey(verbose_name=b'Plate ID/ Barcode', to='assays.AssayDeviceReadout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayplatetestresult', name='created_by', field=models.ForeignKey(related_name='assayplatetestresult_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayplatetestresult', name='modified_by', field=models.ForeignKey(related_name='assayplatetestresult_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayplatetestresult', name='signed_off_by', field=models.ForeignKey(related_name='assayplatetestresult_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayplatetestresult', name='time_units', field=models.ForeignKey(blank=True, to='assays.TimeUnits', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assayplatetestresult', name='value_units', field=models.ForeignKey(blank=True, to='assays.PhysicalUnits', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaymodeltype', name='created_by', field=models.ForeignKey(related_name='assaymodeltype_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaymodeltype', name='modified_by', field=models.ForeignKey(related_name='assaymodeltype_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaymodeltype', name='signed_off_by', field=models.ForeignKey(related_name='assaymodeltype_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaymodel', name='assay_type', field=models.ForeignKey(to='assays.AssayModelType', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaymodel', name='created_by', field=models.ForeignKey(related_name='assaymodel_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaymodel', name='modified_by', field=models.ForeignKey(related_name='assaymodel_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaymodel', name='signed_off_by', field=models.ForeignKey(related_name='assaymodel_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaylayoutformat', name='created_by', field=models.ForeignKey(related_name='assaylayoutformat_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaylayoutformat', name='device', field=models.ForeignKey(to='microdevices.Microdevice', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaylayoutformat', name='modified_by', field=models.ForeignKey(related_name='assaylayoutformat_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaylayoutformat', name='signed_off_by', field=models.ForeignKey(related_name='assaylayoutformat_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaylayout', name='base_layout', field=models.ForeignKey(to='assays.AssayBaseLayout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaylayout', name='created_by', field=models.ForeignKey(related_name='assaylayout_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaylayout', name='modified_by', field=models.ForeignKey(related_name='assaylayout_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaylayout', name='signed_off_by', field=models.ForeignKey(related_name='assaylayout_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='assay_layout', field=models.ForeignKey(to='assays.AssayLayout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='assay_name', field=models.ForeignKey(verbose_name=b'Assay', to='assays.AssayModel', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='cell_sample', field=models.ForeignKey(to='cellsamples.CellSample', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='created_by', field=models.ForeignKey(related_name='assaydevicereadout_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='modified_by', field=models.ForeignKey(related_name='assaydevicereadout_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='reader_name', field=models.ForeignKey(verbose_name=b'Reader', to='assays.AssayReader', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='readout_unit', field=models.ForeignKey(to='assays.ReadoutUnit', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='signed_off_by', field=models.ForeignKey(related_name='assaydevicereadout_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaydevicereadout', name='timeunit', field=models.ForeignKey(to='assays.TimeUnits', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaycompound', name='assay_layout', field=models.ForeignKey(to='assays.AssayLayout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaycompound', name='compound', field=models.ForeignKey(to='compounds.Compound', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipsetup', name='assay_run_id', field=models.ForeignKey(verbose_name=b'Organ Chip Study', to='assays.AssayRun', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipsetup', name='compound', field=models.ForeignKey(blank=True, to='compounds.Compound', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipsetup', name='created_by', field=models.ForeignKey(related_name='assaychipsetup_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipsetup', name='device', field=models.ForeignKey(verbose_name=b'Organ Model Name', to='microdevices.OrganModel', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipsetup', name='group', field=models.ForeignKey(help_text=b'Bind to a group', to='auth.Group', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipsetup', name='modified_by', field=models.ForeignKey(related_name='assaychipsetup_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipsetup', name='signed_off_by', field=models.ForeignKey(related_name='assaychipsetup_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipsetup', name='unit', field=models.ForeignKey(default=4, blank=True, to='assays.PhysicalUnits', null=True, verbose_name=b'conc. Unit', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadoutassay', name='assay_id', field=models.ForeignKey(verbose_name=b'Assay', to='assays.AssayModel', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadoutassay', name='reader_id', field=models.ForeignKey(verbose_name=b'Reader', to='assays.AssayReader', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadoutassay', name='readout_id', field=models.ForeignKey(verbose_name=b'Readout', to='assays.AssayChipReadout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadoutassay', name='readout_unit', field=models.ForeignKey(to='assays.ReadoutUnit', on_delete=models.CASCADE), preserve_default=True, ), migrations.AlterUniqueTogether( name='assaychipreadoutassay', unique_together=set([('readout_id', 'assay_id')]), ), migrations.AddField( model_name='assaychipreadout', name='chip_setup', field=models.ForeignKey(null=True, to='assays.AssayChipSetup', unique=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadout', name='created_by', field=models.ForeignKey(related_name='assaychipreadout_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadout', name='group', field=models.ForeignKey(help_text=b'Bind to a group', to='auth.Group', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadout', name='modified_by', field=models.ForeignKey(related_name='assaychipreadout_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadout', name='signed_off_by', field=models.ForeignKey(related_name='assaychipreadout_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipreadout', name='timeunit', field=models.ForeignKey(default=3, to='assays.TimeUnits', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychiprawdata', name='assay_chip_id', field=models.ForeignKey(to='assays.AssayChipReadout', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychiprawdata', name='assay_id', field=models.ForeignKey(to='assays.AssayChipReadoutAssay', on_delete=models.CASCADE), preserve_default=True, ), migrations.AlterUniqueTogether( name='assaychiprawdata', unique_together=set([('assay_chip_id', 'assay_id', 'field_id', 'elapsed_time')]), ), migrations.AddField( model_name='assaychipcells', name='assay_chip', field=models.ForeignKey(to='assays.AssayChipSetup', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipcells', name='cell_biosensor', field=models.ForeignKey(blank=True, to='cellsamples.Biosensor', null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaychipcells', name='cell_sample', field=models.ForeignKey(to='cellsamples.CellSample', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaybaselayout', name='created_by', field=models.ForeignKey(related_name='assaybaselayout_created_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaybaselayout', name='layout_format', field=models.ForeignKey(to='assays.AssayLayoutFormat', on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaybaselayout', name='modified_by', field=models.ForeignKey(related_name='assaybaselayout_modified_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), migrations.AddField( model_name='assaybaselayout', name='signed_off_by', field=models.ForeignKey(related_name='assaybaselayout_signed_off_by', blank=True, to=settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE), preserve_default=True, ), ]
import re from itertools import count from xml.sax.saxutils import quoteattr from django.conf import settings from html5lib import HTMLParser from html5lib.serializer.htmlserializer import HTMLSerializer from html5lib.treebuilders import getTreeBuilder from html5lib.treewalkers import getTreeWalker from lxml.etree import Element from statsd import statsd from django.utils.translation import ugettext as _, ugettext_lazy as _lazy from kitsune.gallery.models import Image from kitsune.sumo import parser as sumo_parser from kitsune.sumo.parser import ALLOWED_ATTRIBUTES, get_object_fallback from kitsune.sumo.utils import uselocale from kitsune.wiki.models import Document # block elements wikimarkup knows about (and thus preserves) BLOCK_LEVEL_ELEMENTS = ['table', 'blockquote', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'td', 'th', 'div', 'hr', 'pre', 'p', 'li', 'ul', 'ol', 'center', 'dl', 'dt', 'dd', 'ins', 'del', 'section'] TEMPLATE_ARG_REGEX = re.compile('{{{([^{]+?)}}}') def wiki_to_html(wiki_markup, locale=settings.WIKI_DEFAULT_LANGUAGE, doc_id=None, parser_cls=None): """Wiki Markup -> HTML with the wiki app's enhanced parser""" if parser_cls is None: parser_cls = WikiParser with statsd.timer('wiki.render'): with uselocale(locale): content = parser_cls(doc_id=doc_id).parse( wiki_markup, show_toc=False, locale=locale, toc_string=_('Table of Contents')) return content def _format_template_content(content, params): """Formats a template's content using passed in arguments""" def arg_replace(matchobj): """Takes a regex matching {{{name}} and returns params['name']""" param_name = matchobj.group(1) if param_name in params: return params[param_name] return TEMPLATE_ARG_REGEX.sub(arg_replace, content) def _build_template_params(params_str): """Builds a dictionary from a given list of raw strings passed in by the user. Example syntax it handles: * ['one', 'two'] turns into {1: 'one', 2: 'two'} * ['12=blah'] turns into {12: 'blah'} * ['name=value'] turns into {'name': 'value'} """ i = 0 params = {} for item in params_str: param, __, value = item.partition('=') if value: params[param] = value else: i = i + 1 params[str(i)] = param return params # Custom syntax using regexes follows below. # * turn tags of the form {tag content} into <span class="tag">content</span> # * expand {key ctrl+alt} into <span class="key">ctrl</span> + # <span class="key">alt</span> # * turn {note}note{/note} into <div class="note">a note</div> def _key_split(matchobj): """Expands a {key a+b+c} syntax into <span class="key">a</span> + ... More explicitly, it takes a regex matching {key ctrl+alt+del} and returns: <span class="key">ctrl</span> + <span class="key">alt</span> + <span class="key">del</span> """ keys = [k.strip() for k in matchobj.group(1).split('+')] return ' + '.join(['<span class="key">%s</span>' % key for key in keys]) PATTERNS = [ (re.compile(pattern, re.DOTALL), replacement) for pattern, replacement in ( # (x, y), replace x with y (r'{(?P<name>note|warning)}', '<div class="\g<name>">'), (r'\{/(note|warning)\}', '</div>'), # To use } as a key, this syntax won't work. Use [[T:key|}]] instead (r'\{key (.+?)\}', _key_split), # ungreedy: stop at the first } (r'{(?P<name>button|menu|filepath|pref) (?P<content>.*?)}', '<span class="\g<name>">\g<content></span>'), )] def parse_simple_syntax(text): for pattern, replacement in PATTERNS: text = pattern.sub(replacement, text) return text class ForParser(object): """HTML 5 parser which finds <for> tags and translates them into spans and divs having the proper data- elements and classes. As a side effect, repairs poorly matched pairings of <for> (and other tags), probably in favor of the location of the opening tag. """ TREEBUILDER = 'lxml' CONTAINER_TAG = 'div' def __init__(self, html): """Create a parse tree from the given HTML.""" def really_parse_fragment(parser, html): """Parse a possibly multi-rooted HTML fragment, wrapping it in a <div> to make it easy to query later. As far as I can tell, this is what parseFragment is supposed to do (but doesn't). See http://code.google.com/p/html5lib/issues/detail?id=161. """ top_level_elements = parser.parseFragment(html) container = Element(self.CONTAINER_TAG) # Why lxml couldn't just have text nodes, I'll never understand. # Text nodes that come other than first are automatically stuffed # into the tail attrs of the preceding elements by html5lib. if top_level_elements and isinstance(top_level_elements[0], basestring): container.text = top_level_elements.pop(0) container.extend(top_level_elements) return container p = HTMLParser(tree=getTreeBuilder(self.TREEBUILDER)) self._root = really_parse_fragment(p, html) def expand_fors(self): """Turn the for tags into spans and divs, and apply data attrs. If a for contains any block-level elements, it turns into a div. Otherwise, it turns into a span. """ html_ns = 'http://www.w3.org/1999/xhtml' for for_el in self._root.xpath('//html:for', namespaces={'html': html_ns}): for_el.tag = ('div' if any(for_el.find('{' + html_ns + '}' + tag) is not None for tag in BLOCK_LEVEL_ELEMENTS) else 'span') for_el.attrib['class'] = 'for' def to_unicode(self): """Return the unicode serialization of myself.""" container_len = len(self.CONTAINER_TAG) + 2 # 2 for the <> walker = getTreeWalker(self.TREEBUILDER) stream = walker(self._root) serializer = HTMLSerializer(quote_attr_values=True, omit_optional_tags=False) return serializer.render(stream)[container_len:-container_len - 1] @staticmethod def _on_own_line(match, postspace): """Return (whether the tag is on its own line, whether the tag is at the very top of the string, whether the tag is at the very bottom of the string). Tolerates whitespace to the right of the tag: a tag with trailing whitespace on the line can still be considered to be on its own line. """ pos_before_tag = match.start(2) - 1 if pos_before_tag >= 0: at_left = match.string[pos_before_tag] == '\n' at_top = False else: at_left = at_top = True at_bottom_modulo_space = match.end(4) == len(match.string) at_right_modulo_space = at_bottom_modulo_space or '\n' in postspace return (at_left and at_right_modulo_space, at_top, at_bottom_modulo_space) @staticmethod def _wiki_to_tag(attrs): """Turn {for ...} into <for data-for="...">.""" if not attrs: return '<for>' # Strip leading and trailing whitespace from each value for easier # matching in the JS: stripped = ','.join([x.strip() for x in attrs.split(',')]) return '<for data-for=' + quoteattr(stripped) + '>' _FOR_OR_CLOSER = re.compile(r'(\s*)' r'(\{for(?: +([^\}]*))?\}|{/for})' r'(\s*)', re.MULTILINE) @classmethod def strip_fors(cls, text): """Replace each {for} or {/for} tag with a unique token the wiki formatter will treat as inline. Return (stripped text, dehydrated fors for use with unstrip_fors). """ # "attributes" of {for a, b} directives, like "a, b" keyed for token # number dehydrations = {} indexes = count() def dehydrate(match): """Close over `dehydrations`, sock the {for}s away therein, and replace {for}s and {/for}s with tokens.""" def paragraph_padding(str): """If str doesn't contain at least 2 newlines, return enough such that appending them will cause it to.""" return '\n' * max(2 - str.count('\n'), 0) def preceding_whitespace(str, pos): """Return all contiguous whitespace preceding str[pos].""" whitespace = [] for i in xrange(pos - 1, 0, -1): if str[i] in '\t \n\r': whitespace.append(str[i]) else: break whitespace.reverse() return ''.join(whitespace) prespace, tag, attrs, postspace = match.groups() if tag != '{/for}': i = indexes.next() dehydrations[i] = cls._wiki_to_tag(attrs) token = u'\x07%i\x07' % i else: token = u'\x07/sf\x07' # If the {for} or {/for} is on a line by itself (righthand # whitespace is allowed; left would indicate a <pre>), make sure it # has enough newlines on each side to make it its own paragraph, # lest it get sucked into being part of the next or previous # paragraph: on_own_line, at_top, at_bottom = cls._on_own_line(match, postspace) if on_own_line: # If tag (excluding leading whitespace) wasn't at top of # document, space it off from preceding block elements: if not at_top: # If there are already enough \ns before the tag to # distance it from the preceding paragraph, take them into # account before adding more. prespace += paragraph_padding( preceding_whitespace(match.string, match.start(1)) + prespace) # If tag (including trailing whitespace) wasn't at the bottom # of the document, space it off from following block elements: if not at_bottom: postspace += paragraph_padding(postspace) return prespace + token + postspace # Do single replaces over and over, taking into account the effects of # previous ones so that whitespace added in a previous replacement can # be considered for its role in helping to nudge an adjacent block- # level {for} into its own paragraph. There's no pos arg to replace(), # so we had to write our own. pos = 0 while True: m = cls._FOR_OR_CLOSER.search(text, pos) if m is None: return text, dehydrations done = text[:m.start()] + dehydrate(m) # already been searched pos = len(done) text = done + text[m.end():] # Dratted wiki formatter likes to put <p> tags around my token when it sits # on a line by itself, so tolerate and consume that foolishness: _PARSED_STRIPPED_FOR = re.compile( # Whitespace, a {for} token, then more whitespace (including <br>s): r'<p>' r'(?:\s|<br\s*/?>)*' r'\x07(\d+)\x07' # The {for} token r'(?:\s|<br\s*/?>)*' r'</p>' # Alternately, a lone {for} token that didn't get wrapped in a <p>: r'|\x07(\d+)\x07') _PARSED_STRIPPED_FOR_CLOSER = re.compile( # Similar to above, a {/for} token wrapped in <p> and whitespace: r'<p>' r'(?:\s|<br\s*/?>)*' r'\x07/sf\x07' # {/for} token r'(?:\s|<br\s*/?>)*' r'</p>' # Or a lone {/for} token: r'|\x07/sf\x07') @classmethod def unstrip_fors(cls, html, dehydrations): """Replace the tokens with <for> tags the ForParser understands.""" def hydrate(match): return dehydrations.get(int(match.group(1) or match.group(2)), '') # Put <for ...> tags back in: html = cls._PARSED_STRIPPED_FOR.sub(hydrate, html) # Replace {/for} tags: return cls._PARSED_STRIPPED_FOR_CLOSER.sub(u'</for>', html) # L10n: This error is displayed if a template is included into itself. RECURSION_MESSAGE = _lazy(u'[Recursive inclusion of "%s"]') class WikiParser(sumo_parser.WikiParser): """An extension of the parser from the forums adding more crazy features {for} tags, inclusions, and templates--oh my! """ image_template = 'wikiparser/hook_image_lazy.html' def __init__(self, base_url=None, doc_id=None): """ doc_id -- If you want to be nice, pass the ID of the Document you are rendering. This will make recursive inclusions fail immediately rather than after the first round of recursion. """ super(WikiParser, self).__init__(base_url) # Stack of document IDs to prevent Include or Template recursion: self.inclusions = [doc_id] if doc_id else [] # The wiki has additional hooks not used elsewhere self.registerInternalLinkHook('Include', self._hook_include) self.registerInternalLinkHook('I', self._hook_include) self.registerInternalLinkHook('Template', self._hook_template) self.registerInternalLinkHook('T', self._hook_template) def parse(self, text, **kwargs): """Wrap SUMO's parse() to support additional wiki-only features.""" # Replace fors with inline tokens the wiki formatter will tolerate: text, data = ForParser.strip_fors(text) # Do simple substitutions: text = parse_simple_syntax(text) # Run the formatter: html = super(WikiParser, self).parse( text, youtube_embeds=False, **kwargs) # Put the fors back in (as XML-ish <for> tags this time): html = ForParser.unstrip_fors(html, data) # Balance badly paired <for> tags: for_parser = ForParser(html) # Convert them to spans and divs: for_parser.expand_fors() html = for_parser.to_unicode() html = self.add_youtube_embeds(html) return html def _hook_include(self, parser, space, title): """Returns the document's parsed content.""" message = _('The document "%s" does not exist.') % title include = get_object_fallback(Document, title, locale=self.locale) if not include or not include.current_revision: return message if include.id in parser.inclusions: return RECURSION_MESSAGE % title else: parser.inclusions.append(include.id) ret = parser.parse(include.current_revision.content, show_toc=False, locale=self.locale) parser.inclusions.pop() return ret # Wiki templates are documents that receive arguments. # # They can be useful when including similar content in multiple places, # with slight variations. For examples and details see: # http://www.mediawiki.org/wiki/Help:Templates # def _hook_template(self, parser, space, title): """Handles Template:Template name, formatting the content using given args""" params = title.split('|') short_title = params.pop(0) template_title = 'Template:' + short_title message = _('The template "%s" does not exist or has no approved ' 'revision.') % short_title template = get_object_fallback(Document, template_title, locale=self.locale, is_template=True) if not template or not template.current_revision: return message if template.id in parser.inclusions: return RECURSION_MESSAGE % template_title else: parser.inclusions.append(template.id) c = template.current_revision.content.rstrip() # Note: this completely ignores the allowed attributes passed to the # WikiParser.parse() method and defaults to ALLOWED_ATTRIBUTES. parsed = parser.parse(c, show_toc=False, attributes=ALLOWED_ATTRIBUTES, locale=self.locale) parser.inclusions.pop() # Special case for inline templates if '\n' not in c: parsed = parsed.replace('<p>', '') parsed = parsed.replace('</p>', '') # Do some string formatting to replace parameters return _format_template_content(parsed, _build_template_params(params)) class WhatLinksHereParser(WikiParser): """An extension of the wiki that deals with what links here data.""" def __init__(self, doc_id, **kwargs): self.current_doc = Document.objects.get(pk=doc_id) return (super(WhatLinksHereParser, self) .__init__(doc_id=doc_id, **kwargs)) def _hook_internal_link(self, parser, space, name): """Records links between documents, and then calls super().""" title = name.split('|')[0] locale = self.current_doc.locale linked_doc = get_object_fallback(Document, title, locale) if linked_doc is not None: self.current_doc.add_link_to(linked_doc, 'link') return (super(WhatLinksHereParser, self) ._hook_internal_link(parser, space, name)) def _hook_template(self, parser, space, name): """Record a template link between documents, and then call super().""" params = name.split('|') template = get_object_fallback(Document, 'Template:' + params[0], locale=self.locale, is_template=True) if template: self.current_doc.add_link_to(template, 'template') return (super(WhatLinksHereParser, self) ._hook_template(parser, space, name)) def _hook_include(self, parser, space, name): """Record an include link between documents, and then call super().""" include = get_object_fallback(Document, name, locale=self.locale) if include: self.current_doc.add_link_to(include, 'include') return (super(WhatLinksHereParser, self) ._hook_include(parser, space, name)) def _hook_image_tag(self, parser, space, name): """Record an image is included in a document, then call super().""" title = name.split('|')[0] image = get_object_fallback(Image, title, self.locale) if image: self.current_doc.add_image(image) return (super(WhatLinksHereParser, self) ._hook_image_tag(parser, space, name))
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpRequest, HttpResponse from azure.mgmt.core.exceptions import ARMErrorFormat from .. import models if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] class JobsOperations(object): """JobsOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.mgmt.scheduler.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config def get( self, resource_group_name, # type: str job_collection_name, # type: str job_name, # type: str **kwargs # type: Any ): # type: (...) -> "models.JobDefinition" """Gets a job. :param resource_group_name: The resource group name. :type resource_group_name: str :param job_collection_name: The job collection name. :type job_collection_name: str :param job_name: The job name. :type job_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: JobDefinition, or the result of cls(response) :rtype: ~azure.mgmt.scheduler.models.JobDefinition :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.JobDefinition"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-03-01" accept = "application/json, text/json" # Construct URL url = self.get.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'), 'jobName': self._serialize.url("job_name", job_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') request = self._client.get(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('JobDefinition', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}'} # type: ignore def create_or_update( self, resource_group_name, # type: str job_collection_name, # type: str job_name, # type: str job, # type: "models.JobDefinition" **kwargs # type: Any ): # type: (...) -> "models.JobDefinition" """Provisions a new job or updates an existing job. :param resource_group_name: The resource group name. :type resource_group_name: str :param job_collection_name: The job collection name. :type job_collection_name: str :param job_name: The job name. :type job_name: str :param job: The job definition. :type job: ~azure.mgmt.scheduler.models.JobDefinition :keyword callable cls: A custom type or function that will be passed the direct response :return: JobDefinition, or the result of cls(response) :rtype: ~azure.mgmt.scheduler.models.JobDefinition :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.JobDefinition"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-03-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json, text/json" # Construct URL url = self.create_or_update.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'), 'jobName': self._serialize.url("job_name", job_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(job, 'JobDefinition') body_content_kwargs['content'] = body_content request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 201]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if response.status_code == 200: deserialized = self._deserialize('JobDefinition', pipeline_response) if response.status_code == 201: deserialized = self._deserialize('JobDefinition', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}'} # type: ignore def patch( self, resource_group_name, # type: str job_collection_name, # type: str job_name, # type: str job, # type: "models.JobDefinition" **kwargs # type: Any ): # type: (...) -> "models.JobDefinition" """Patches an existing job. :param resource_group_name: The resource group name. :type resource_group_name: str :param job_collection_name: The job collection name. :type job_collection_name: str :param job_name: The job name. :type job_name: str :param job: The job definition. :type job: ~azure.mgmt.scheduler.models.JobDefinition :keyword callable cls: A custom type or function that will be passed the direct response :return: JobDefinition, or the result of cls(response) :rtype: ~azure.mgmt.scheduler.models.JobDefinition :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.JobDefinition"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-03-01" content_type = kwargs.pop("content_type", "application/json") accept = "application/json, text/json" # Construct URL url = self.patch.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'), 'jobName': self._serialize.url("job_name", job_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str') header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') body_content_kwargs = {} # type: Dict[str, Any] body_content = self._serialize.body(job, 'JobDefinition') body_content_kwargs['content'] = body_content request = self._client.patch(url, query_parameters, header_parameters, **body_content_kwargs) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) deserialized = self._deserialize('JobDefinition', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized patch.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}'} # type: ignore def delete( self, resource_group_name, # type: str job_collection_name, # type: str job_name, # type: str **kwargs # type: Any ): # type: (...) -> None """Deletes a job. :param resource_group_name: The resource group name. :type resource_group_name: str :param job_collection_name: The job collection name. :type job_collection_name: str :param job_name: The job name. :type job_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-03-01" # Construct URL url = self.delete.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'), 'jobName': self._serialize.url("job_name", job_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.delete(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}'} # type: ignore def run( self, resource_group_name, # type: str job_collection_name, # type: str job_name, # type: str **kwargs # type: Any ): # type: (...) -> None """Runs a job. :param resource_group_name: The resource group name. :type resource_group_name: str :param job_collection_name: The job collection name. :type job_collection_name: str :param job_name: The job name. :type job_name: str :keyword callable cls: A custom type or function that will be passed the direct response :return: None, or the result of cls(response) :rtype: None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-03-01" # Construct URL url = self.run.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'), 'jobName': self._serialize.url("job_name", job_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') # Construct headers header_parameters = {} # type: Dict[str, Any] request = self._client.post(url, query_parameters, header_parameters) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) if cls: return cls(pipeline_response, None, {}) run.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/run'} # type: ignore def list( self, resource_group_name, # type: str job_collection_name, # type: str top=None, # type: Optional[int] skip=None, # type: Optional[int] filter=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Iterable["models.JobListResult"] """Lists all jobs under the specified job collection. :param resource_group_name: The resource group name. :type resource_group_name: str :param job_collection_name: The job collection name. :type job_collection_name: str :param top: The number of jobs to request, in the of range of [1..100]. :type top: int :param skip: The (0-based) index of the job history list from which to begin requesting entries. :type skip: int :param filter: The filter to apply on the job state. :type filter: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either JobListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.scheduler.models.JobListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.JobListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-03-01" accept = "application/json, text/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if top is not None: query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=100, minimum=1) if skip is not None: query_parameters['$skip'] = self._serialize.query("skip", skip, 'int') if filter is not None: query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('JobListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs'} # type: ignore def list_job_history( self, resource_group_name, # type: str job_collection_name, # type: str job_name, # type: str top=None, # type: Optional[int] skip=None, # type: Optional[int] filter=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Iterable["models.JobHistoryListResult"] """Lists job history. :param resource_group_name: The resource group name. :type resource_group_name: str :param job_collection_name: The job collection name. :type job_collection_name: str :param job_name: The job name. :type job_name: str :param top: the number of job history to request, in the of range of [1..100]. :type top: int :param skip: The (0-based) index of the job history list from which to begin requesting entries. :type skip: int :param filter: The filter to apply on the job state. :type filter: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either JobHistoryListResult or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.mgmt.scheduler.models.JobHistoryListResult] :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType["models.JobHistoryListResult"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = "2016-03-01" accept = "application/json, text/json" def prepare_request(next_link=None): # Construct headers header_parameters = {} # type: Dict[str, Any] header_parameters['Accept'] = self._serialize.header("accept", accept, 'str') if not next_link: # Construct URL url = self.list_job_history.metadata['url'] # type: ignore path_format_arguments = { 'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'), 'jobCollectionName': self._serialize.url("job_collection_name", job_collection_name, 'str'), 'jobName': self._serialize.url("job_name", job_name, 'str'), } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} # type: Dict[str, Any] query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') if top is not None: query_parameters['$top'] = self._serialize.query("top", top, 'int', maximum=100, minimum=1) if skip is not None: query_parameters['$skip'] = self._serialize.query("skip", skip, 'int') if filter is not None: query_parameters['$filter'] = self._serialize.query("filter", filter, 'str') request = self._client.get(url, query_parameters, header_parameters) else: url = next_link query_parameters = {} # type: Dict[str, Any] request = self._client.get(url, query_parameters, header_parameters) return request def extract_data(pipeline_response): deserialized = self._deserialize('JobHistoryListResult', pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response, error_format=ARMErrorFormat) return pipeline_response return ItemPaged( get_next, extract_data ) list_job_history.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Scheduler/jobCollections/{jobCollectionName}/jobs/{jobName}/history'} # type: ignore
"""Copyright 2014 Cyrus Dasadia Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from django.test import TestCase, TransactionTestCase, Client from django.contrib.auth.models import User from django import forms from appauth.models import Perms from cito_engine.models import Team from . import factories class TestUserViews(TransactionTestCase): def setUp(self): self.client = Client() self.user = User.objects.create_user(username='hodor', password='hodor', first_name='Hodor', last_name='HodorHodor', email='hodor@hodor.hodor') Perms.objects.create(user=self.user, access_level=1).save() def login(self): self.client.login(username='hodor', password='hodor') def test_user_views_without_login(self): """All user views without login""" for view in ['/', '/create/', '/toggle/', '/team/add/', '/view/1/', '/edit/1/', '/perms/update/']: response = self.client.get('/users%s' % view) self.assertRedirects(response, '/login/?next=/users%s' % view, msg_prefix='Error for view:%s' % view) def test_view_all_users(self): """View all users""" self.login() response = self.client.get('/users/') self.assertEquals(response.status_code, 200) self.assertContains(response, 'Hodor HodorHodor') def test_modify_user_team_membership(self): """Testing user team modification view""" t1 = factories.TeamFactory.create(name='Alpha') t2 = factories.TeamFactory.create(name='Beta') self.login() # Fail on GET response = self.client.get('/users/team/add/') self.assertEquals(response.status_code, 400) response = self.client.get('/users/team/remove/') self.assertEquals(response.status_code, 400) # Try adding response = self.client.post('/users/team/add/', data={'user_id': self.user.id, 'team_id': t1.id}, follow=True) self.assertRedirects(response, '/users/view/%d/' % self.user.id) self.assertEquals(self.user.team_set.get(id=t1.id), t1) self.assertNotIn(t2, self.user.team_set.filter(id=t2.id)) # Try removing response = self.client.post('/users/team/remove/', data={'user_id': self.user.id, 'team_id': t1.id}, follow=True) self.assertRedirects(response, '/users/view/%d/' % self.user.id) self.assertNotIn(t1, self.user.team_set.filter(id=t1.id)) def test_user_edit_view(self): """Testing edit user view without""" self.login() data = dict(first_name='Burt', last_name='Reynolds', email='b@b.com', username='burty', ) response = self.client.post('/users/edit/%d/' % self.user.id, data=data, follow=True) self.assertRedirects(response, '/users/view/%d/' % self.user.id) def test_user_edit_view_with_password_change(self): """Testing edit user view with password change""" self.login() data = dict(first_name='Burt', last_name='Reynolds', email='b@b.com', username='burty', password1='pass1', password2='pass1') response = self.client.post('/users/edit/%d/' % self.user.id, data=data, follow=True) self.assertRedirects(response, '/login/?next=/users/view/%s/' % self.user.id) def test_user_toggle(self): """Test user_toggle view i.e setting is_active True/False""" self.login() response = self.client.post('/users/toggle/', data={'user_id': self.user.id}) self.assertRedirects(response, '/users/view/%d/' % self.user.id) u = User.objects.get(pk=self.user.id) # Make sure we do not deactivate ourself self.assertEqual(u.is_active, True) # Try on a new piggy # First disable the user new_user = User.objects.create_user(username='archer', password='archer') response = self.client.post('/users/toggle/', data={'user_id': new_user.id}) self.assertRedirects(response, '/users/view/%d/' % new_user.id) u = User.objects.get(pk=new_user.id) self.assertFalse(u.is_active) # Now enable the user response = self.client.post('/users/toggle/', data={'user_id': new_user.id}) self.assertRedirects(response, '/users/view/%d/' % new_user.id) u = User.objects.get(pk=new_user.id) self.assertTrue(u.is_active) class TestCreateUserByView(TransactionTestCase): def setUp(self): self.client = Client() self.user = User.objects.create_user(username='hodor', password='hodor', first_name='Hodor', last_name='HodorHodor', email='hodor@hodor.hodor') Perms.objects.create(user=self.user, access_level=1).save() self.t1 = factories.TeamFactory.create(name='AlphaTeam') self.t2 = factories.TeamFactory.create(name='BetaTeam') self.t3 = factories.TeamFactory.create(name='ZetaTeam') def login(self): self.client.login(username='hodor', password='hodor') def test_validations_of_create_user_view(self): """Test validations for create_user view """ data = dict(fname='', lname='', username='', password1='', password2='', email='', access_level='', teams='' ) self.login() # Make sure all fields are required response = self.client.post('/users/create/', data=data, follow=True) self.assertContains(response, 'This field is required.', count=7) data = dict(fname='Doctor', lname='Zoidberg', username='drzoid', password1='mojo', password2='jojo', email='mo@jo.com', access_level='3', teams='%s' % self.t2.id ) # Testing password matching response = self.client.post('/users/create/', data=data, follow=True) self.assertContains(response, 'The passwords did not match. Please try again.', count=2) # TODO Fix this test so it runs successfully even through the main testsuite # def test_create_user_view(self): # """Testing create_user""" # # self.login() # client = Client() # client.login(username='hodor', password='hodor') # data = dict(fname='Doctor', # lname='Zoidberg', # username='drzoid', # password1='mojo', # password2='mojo', # email='mo@jo.com', # access_level='3', # teams='%s' % self.t2.id # ) # print Team.objects.all() # # Test with legit form # response = client.post('/users/create/', data=data, follow=True) # # print response # self.assertRedirects(response, '/users/', msg_prefix="Data is %s" % data) # user = User.objects.get(username='drzoid') # self.assertEquals(user.first_name, 'Doctor') # self.assertEquals(user.last_name, 'Zoidberg') # self.assertEquals(user.email, 'mo@jo.com') # self.assertEquals(user.perms.access_level, 3) # self.assertNotIn(self.t2, self.user.team_set.filter(id=self.t2.id)) class TestViewSingleUser(TransactionTestCase): def setUp(self): self.client = Client() def test_view_single_user(self): """View single user""" u1 = User.objects.create_user(username='pjfry', password='pjfry', first_name='Phillip', last_name='J Fry', email='phil.fry@planetexpress.com') client = Client() client.login(username='pjfry', password='pjfry') response = client.get('/users/view/%d/' % u1.id) self.assertEquals(response.status_code, 200, msg='Got response: \n %s' % response) self.assertIsNotNone(Perms.objects.get(pk=u1.id)) self.assertContains(response, 'Phillip') self.assertContains(response, 'J Fry') self.assertContains(response, 'phil.fry@planetexpress.com') # Lets check for an invalid user_id response = client.get('/users/view/9999/') self.assertEquals(response.status_code, 404) class TestUpdateUserPerms(TransactionTestCase): def setUp(self): self.client = Client() self.user = User.objects.create_user(username='hodor', password='hodor', first_name='Hodor', last_name='HodorHodor', email='hodor@hodor.hodor') Perms.objects.create(user=self.user, access_level=1).save() def test_update_user_perms(self): """Updating user permissions""" self.client.login(username='hodor', password='hodor') new_user = User.objects.create_user(username='phil', password='pjfry', first_name='Phillip', last_name='J Fry', email='phil.fry@planetexpress.com') #Test if perms are created for new user response = self.client.post('/users/perms/update/', data={'user_id': new_user.id, 'access_level': '5'}, follow=True) self.assertRedirects(response, '/users/view/%d/' % new_user.id) user = User.objects.get(pk=new_user.id) self.assertEquals(user.perms.access_level, 5) # Test updating once you have perms response = self.client.post('/users/perms/update/', data={'user_id': new_user.id, 'access_level': '2'}, follow=True) self.assertRedirects(response, '/users/view/%d/' % new_user.id) user = User.objects.get(pk=new_user.id) self.assertEquals(user.perms.access_level, 2)
import os from typing import Callable from typing import cast from typing import List from typing import Optional from typing import TYPE_CHECKING from typing import Union from . import autogenerate as autogen from . import util from .runtime.environment import EnvironmentContext from .script import ScriptDirectory if TYPE_CHECKING: from alembic.config import Config from alembic.script.base import Script def list_templates(config): """List available templates. :param config: a :class:`.Config` object. """ config.print_stdout("Available templates:\n") for tempname in os.listdir(config.get_template_directory()): with open( os.path.join(config.get_template_directory(), tempname, "README") ) as readme: synopsis = next(readme) config.print_stdout("%s - %s", tempname, synopsis) config.print_stdout("\nTemplates are used via the 'init' command, e.g.:") config.print_stdout("\n alembic init --template generic ./scripts") def init( config: "Config", directory: str, template: str = "generic", package: bool = False, ) -> None: """Initialize a new scripts directory. :param config: a :class:`.Config` object. :param directory: string path of the target directory :param template: string name of the migration environment template to use. :param package: when True, write ``__init__.py`` files into the environment location as well as the versions/ location. .. versionadded:: 1.2 """ if os.access(directory, os.F_OK) and os.listdir(directory): raise util.CommandError( "Directory %s already exists and is not empty" % directory ) template_dir = os.path.join(config.get_template_directory(), template) if not os.access(template_dir, os.F_OK): raise util.CommandError("No such template %r" % template) if not os.access(directory, os.F_OK): util.status( "Creating directory %s" % os.path.abspath(directory), os.makedirs, directory, ) versions = os.path.join(directory, "versions") util.status( "Creating directory %s" % os.path.abspath(versions), os.makedirs, versions, ) script = ScriptDirectory(directory) for file_ in os.listdir(template_dir): file_path = os.path.join(template_dir, file_) if file_ == "alembic.ini.mako": config_file = os.path.abspath(cast(str, config.config_file_name)) if os.access(cast(str, config_file), os.F_OK): util.msg("File %s already exists, skipping" % config_file) else: script._generate_template( file_path, config_file, script_location=directory ) elif os.path.isfile(file_path): output_file = os.path.join(directory, file_) script._copy_file(file_path, output_file) if package: for path in [ os.path.join(os.path.abspath(directory), "__init__.py"), os.path.join(os.path.abspath(versions), "__init__.py"), ]: file_ = util.status("Adding %s" % path, open, path, "w") file_.close() # type:ignore[attr-defined] util.msg( "Please edit configuration/connection/logging " "settings in %r before proceeding." % config_file ) def revision( config: "Config", message: Optional[str] = None, autogenerate: bool = False, sql: bool = False, head: str = "head", splice: bool = False, branch_label: Optional[str] = None, version_path: Optional[str] = None, rev_id: Optional[str] = None, depends_on: Optional[str] = None, process_revision_directives: Callable = None, ) -> Union[Optional["Script"], List[Optional["Script"]]]: """Create a new revision file. :param config: a :class:`.Config` object. :param message: string message to apply to the revision; this is the ``-m`` option to ``alembic revision``. :param autogenerate: whether or not to autogenerate the script from the database; this is the ``--autogenerate`` option to ``alembic revision``. :param sql: whether to dump the script out as a SQL string; when specified, the script is dumped to stdout. This is the ``--sql`` option to ``alembic revision``. :param head: head revision to build the new revision upon as a parent; this is the ``--head`` option to ``alembic revision``. :param splice: whether or not the new revision should be made into a new head of its own; is required when the given ``head`` is not itself a head. This is the ``--splice`` option to ``alembic revision``. :param branch_label: string label to apply to the branch; this is the ``--branch-label`` option to ``alembic revision``. :param version_path: string symbol identifying a specific version path from the configuration; this is the ``--version-path`` option to ``alembic revision``. :param rev_id: optional revision identifier to use instead of having one generated; this is the ``--rev-id`` option to ``alembic revision``. :param depends_on: optional list of "depends on" identifiers; this is the ``--depends-on`` option to ``alembic revision``. :param process_revision_directives: this is a callable that takes the same form as the callable described at :paramref:`.EnvironmentContext.configure.process_revision_directives`; will be applied to the structure generated by the revision process where it can be altered programmatically. Note that unlike all the other parameters, this option is only available via programmatic use of :func:`.command.revision` """ script_directory = ScriptDirectory.from_config(config) command_args = dict( message=message, autogenerate=autogenerate, sql=sql, head=head, splice=splice, branch_label=branch_label, version_path=version_path, rev_id=rev_id, depends_on=depends_on, ) revision_context = autogen.RevisionContext( config, script_directory, command_args, process_revision_directives=process_revision_directives, ) environment = util.asbool(config.get_main_option("revision_environment")) if autogenerate: environment = True if sql: raise util.CommandError( "Using --sql with --autogenerate does not make any sense" ) def retrieve_migrations(rev, context): revision_context.run_autogenerate(rev, context) return [] elif environment: def retrieve_migrations(rev, context): revision_context.run_no_autogenerate(rev, context) return [] elif sql: raise util.CommandError( "Using --sql with the revision command when " "revision_environment is not configured does not make any sense" ) if environment: with EnvironmentContext( config, script_directory, fn=retrieve_migrations, as_sql=sql, template_args=revision_context.template_args, revision_context=revision_context, ): script_directory.run_env() # the revision_context now has MigrationScript structure(s) present. # these could theoretically be further processed / rewritten *here*, # in addition to the hooks present within each run_migrations() call, # or at the end of env.py run_migrations_online(). scripts = [script for script in revision_context.generate_scripts()] if len(scripts) == 1: return scripts[0] else: return scripts def merge( config: "Config", revisions: str, message: str = None, branch_label: str = None, rev_id: str = None, ) -> Optional["Script"]: """Merge two revisions together. Creates a new migration file. :param config: a :class:`.Config` instance :param message: string message to apply to the revision :param branch_label: string label name to apply to the new revision :param rev_id: hardcoded revision identifier instead of generating a new one. .. seealso:: :ref:`branches` """ script = ScriptDirectory.from_config(config) template_args = { "config": "config" # Let templates use config for # e.g. multiple databases } return script.generate_revision( rev_id or util.rev_id(), message, refresh=True, head=revisions, branch_labels=branch_label, **template_args # type:ignore[arg-type] ) def upgrade( config: "Config", revision: str, sql: bool = False, tag: Optional[str] = None, ) -> None: """Upgrade to a later version. :param config: a :class:`.Config` instance. :param revision: string revision target or range for --sql mode :param sql: if True, use ``--sql`` mode :param tag: an arbitrary "tag" that can be intercepted by custom ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` method. """ script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise util.CommandError("Range revision not allowed") starting_rev, revision = revision.split(":", 2) def upgrade(rev, context): return script._upgrade_revs(revision, rev) with EnvironmentContext( config, script, fn=upgrade, as_sql=sql, starting_rev=starting_rev, destination_rev=revision, tag=tag, ): script.run_env() def downgrade( config: "Config", revision: str, sql: bool = False, tag: Optional[str] = None, ) -> None: """Revert to a previous version. :param config: a :class:`.Config` instance. :param revision: string revision target or range for --sql mode :param sql: if True, use ``--sql`` mode :param tag: an arbitrary "tag" that can be intercepted by custom ``env.py`` scripts via the :meth:`.EnvironmentContext.get_tag_argument` method. """ script = ScriptDirectory.from_config(config) starting_rev = None if ":" in revision: if not sql: raise util.CommandError("Range revision not allowed") starting_rev, revision = revision.split(":", 2) elif sql: raise util.CommandError( "downgrade with --sql requires <fromrev>:<torev>" ) def downgrade(rev, context): return script._downgrade_revs(revision, rev) with EnvironmentContext( config, script, fn=downgrade, as_sql=sql, starting_rev=starting_rev, destination_rev=revision, tag=tag, ): script.run_env() def show(config, rev): """Show the revision(s) denoted by the given symbol. :param config: a :class:`.Config` instance. :param revision: string revision target """ script = ScriptDirectory.from_config(config) if rev == "current": def show_current(rev, context): for sc in script.get_revisions(rev): config.print_stdout(sc.log_entry) return [] with EnvironmentContext(config, script, fn=show_current): script.run_env() else: for sc in script.get_revisions(rev): config.print_stdout(sc.log_entry) def history( config: "Config", rev_range: Optional[str] = None, verbose: bool = False, indicate_current: bool = False, ) -> None: """List changeset scripts in chronological order. :param config: a :class:`.Config` instance. :param rev_range: string revision range :param verbose: output in verbose mode. :param indicate_current: indicate current revision. """ base: Optional[str] head: Optional[str] script = ScriptDirectory.from_config(config) if rev_range is not None: if ":" not in rev_range: raise util.CommandError( "History range requires [start]:[end], " "[start]:, or :[end]" ) base, head = rev_range.strip().split(":") else: base = head = None environment = ( util.asbool(config.get_main_option("revision_environment")) or indicate_current ) def _display_history(config, script, base, head, currents=()): for sc in script.walk_revisions( base=base or "base", head=head or "heads" ): if indicate_current: sc._db_current_indicator = sc.revision in currents config.print_stdout( sc.cmd_format( verbose=verbose, include_branches=True, include_doc=True, include_parents=True, ) ) def _display_history_w_current(config, script, base, head): def _display_current_history(rev, context): if head == "current": _display_history(config, script, base, rev, rev) elif base == "current": _display_history(config, script, rev, head, rev) else: _display_history(config, script, base, head, rev) return [] with EnvironmentContext(config, script, fn=_display_current_history): script.run_env() if base == "current" or head == "current" or environment: _display_history_w_current(config, script, base, head) else: _display_history(config, script, base, head) def heads(config, verbose=False, resolve_dependencies=False): """Show current available heads in the script directory. :param config: a :class:`.Config` instance. :param verbose: output in verbose mode. :param resolve_dependencies: treat dependency version as down revisions. """ script = ScriptDirectory.from_config(config) if resolve_dependencies: heads = script.get_revisions("heads") else: heads = script.get_revisions(script.get_heads()) for rev in heads: config.print_stdout( rev.cmd_format( verbose, include_branches=True, tree_indicators=False ) ) def branches(config, verbose=False): """Show current branch points. :param config: a :class:`.Config` instance. :param verbose: output in verbose mode. """ script = ScriptDirectory.from_config(config) for sc in script.walk_revisions(): if sc.is_branch_point: config.print_stdout( "%s\n%s\n", sc.cmd_format(verbose, include_branches=True), "\n".join( "%s -> %s" % ( " " * len(str(sc.revision)), rev_obj.cmd_format( False, include_branches=True, include_doc=verbose ), ) for rev_obj in ( script.get_revision(rev) for rev in sc.nextrev ) ), ) def current(config: "Config", verbose: bool = False) -> None: """Display the current revision for a database. :param config: a :class:`.Config` instance. :param verbose: output in verbose mode. """ script = ScriptDirectory.from_config(config) def display_version(rev, context): if verbose: config.print_stdout( "Current revision(s) for %s:", util.obfuscate_url_pw(context.connection.engine.url), ) for rev in script.get_all_current(rev): config.print_stdout(rev.cmd_format(verbose)) return [] with EnvironmentContext( config, script, fn=display_version, dont_mutate=True ): script.run_env() def stamp( config: "Config", revision: str, sql: bool = False, tag: Optional[str] = None, purge: bool = False, ) -> None: """'stamp' the revision table with the given revision; don't run any migrations. :param config: a :class:`.Config` instance. :param revision: target revision or list of revisions. May be a list to indicate stamping of multiple branch heads. .. note:: this parameter is called "revisions" in the command line interface. .. versionchanged:: 1.2 The revision may be a single revision or list of revisions when stamping multiple branch heads. :param sql: use ``--sql`` mode :param tag: an arbitrary "tag" that can be intercepted by custom ``env.py`` scripts via the :class:`.EnvironmentContext.get_tag_argument` method. :param purge: delete all entries in the version table before stamping. .. versionadded:: 1.2 """ script = ScriptDirectory.from_config(config) if sql: destination_revs = [] starting_rev = None for _revision in util.to_list(revision): if ":" in _revision: srev, _revision = _revision.split(":", 2) if starting_rev != srev: if starting_rev is None: starting_rev = srev else: raise util.CommandError( "Stamp operation with --sql only supports a " "single starting revision at a time" ) destination_revs.append(_revision) else: destination_revs = util.to_list(revision) def do_stamp(rev, context): return script._stamp_revs(util.to_tuple(destination_revs), rev) with EnvironmentContext( config, script, fn=do_stamp, as_sql=sql, starting_rev=starting_rev if sql else None, destination_rev=util.to_tuple(destination_revs), tag=tag, purge=purge, ): script.run_env() def edit(config: "Config", rev: str) -> None: """Edit revision script(s) using $EDITOR. :param config: a :class:`.Config` instance. :param rev: target revision. """ script = ScriptDirectory.from_config(config) if rev == "current": def edit_current(rev, context): if not rev: raise util.CommandError("No current revisions") for sc in script.get_revisions(rev): util.open_in_editor(sc.path) return [] with EnvironmentContext(config, script, fn=edit_current): script.run_env() else: revs = script.get_revisions(rev) if not revs: raise util.CommandError( "No revision files indicated by symbol '%s'" % rev ) for sc in revs: util.open_in_editor(sc.path)
# Copyright 2013 Red Hat, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Client side of the compute RPC API. """ from oslo.config import cfg from oslo import messaging from nova import exception from nova.i18n import _, _LW from nova import objects from nova.objects import base as objects_base from nova.openstack.common import jsonutils from nova.openstack.common import log as logging from nova import rpc from nova import utils rpcapi_opts = [ cfg.StrOpt('compute_topic', default='compute', help='The topic compute nodes listen on'), ] CONF = cfg.CONF CONF.register_opts(rpcapi_opts) rpcapi_cap_opt = cfg.StrOpt('compute', help='Set a version cap for messages sent to compute services. If you ' 'plan to do a live upgrade from havana to icehouse, you should ' 'set this option to "icehouse-compat" before beginning the live ' 'upgrade procedure.') CONF.register_opt(rpcapi_cap_opt, 'upgrade_levels') LOG = logging.getLogger(__name__) def _compute_host(host, instance): '''Get the destination host for a message. :param host: explicit host to send the message to. :param instance: If an explicit host was not specified, use instance['host'] :returns: A host ''' if host: return host if not instance: raise exception.NovaException(_('No compute host specified')) if not instance['host']: raise exception.NovaException(_('Unable to find host for ' 'Instance %s') % instance['uuid']) return instance['host'] class ComputeAPI(object): '''Client side of the compute rpc API. API version history: * 1.0 - Initial version. * 1.1 - Adds get_host_uptime() * 1.2 - Adds check_can_live_migrate_[destination|source] * 1.3 - Adds change_instance_metadata() * 1.4 - Remove instance_uuid, add instance argument to reboot_instance() * 1.5 - Remove instance_uuid, add instance argument to pause_instance(), unpause_instance() * 1.6 - Remove instance_uuid, add instance argument to suspend_instance() * 1.7 - Remove instance_uuid, add instance argument to get_console_output() * 1.8 - Remove instance_uuid, add instance argument to add_fixed_ip_to_instance() * 1.9 - Remove instance_uuid, add instance argument to attach_volume() * 1.10 - Remove instance_id, add instance argument to check_can_live_migrate_destination() * 1.11 - Remove instance_id, add instance argument to check_can_live_migrate_source() * 1.12 - Remove instance_uuid, add instance argument to confirm_resize() * 1.13 - Remove instance_uuid, add instance argument to detach_volume() * 1.14 - Remove instance_uuid, add instance argument to finish_resize() * 1.15 - Remove instance_uuid, add instance argument to finish_revert_resize() * 1.16 - Remove instance_uuid, add instance argument to get_diagnostics() * 1.17 - Remove instance_uuid, add instance argument to get_vnc_console() * 1.18 - Remove instance_uuid, add instance argument to inject_file() * 1.19 - Remove instance_uuid, add instance argument to inject_network_info() * 1.20 - Remove instance_id, add instance argument to post_live_migration_at_destination() * 1.21 - Remove instance_uuid, add instance argument to power_off_instance() and stop_instance() * 1.22 - Remove instance_uuid, add instance argument to power_on_instance() and start_instance() * 1.23 - Remove instance_id, add instance argument to pre_live_migration() * 1.24 - Remove instance_uuid, add instance argument to rebuild_instance() * 1.25 - Remove instance_uuid, add instance argument to remove_fixed_ip_from_instance() * 1.26 - Remove instance_id, add instance argument to remove_volume_connection() * 1.27 - Remove instance_uuid, add instance argument to rescue_instance() * 1.28 - Remove instance_uuid, add instance argument to reset_network() * 1.29 - Remove instance_uuid, add instance argument to resize_instance() * 1.30 - Remove instance_uuid, add instance argument to resume_instance() * 1.31 - Remove instance_uuid, add instance argument to revert_resize() * 1.32 - Remove instance_id, add instance argument to rollback_live_migration_at_destination() * 1.33 - Remove instance_uuid, add instance argument to set_admin_password() * 1.34 - Remove instance_uuid, add instance argument to snapshot_instance() * 1.35 - Remove instance_uuid, add instance argument to unrescue_instance() * 1.36 - Remove instance_uuid, add instance argument to change_instance_metadata() * 1.37 - Remove instance_uuid, add instance argument to terminate_instance() * 1.38 - Changes to prep_resize(): * remove instance_uuid, add instance * remove instance_type_id, add instance_type * remove topic, it was unused * 1.39 - Remove instance_uuid, add instance argument to run_instance() * 1.40 - Remove instance_id, add instance argument to live_migration() * 1.41 - Adds refresh_instance_security_rules() * 1.42 - Add reservations arg to prep_resize(), resize_instance(), finish_resize(), confirm_resize(), revert_resize() and finish_revert_resize() * 1.43 - Add migrate_data to live_migration() * 1.44 - Adds reserve_block_device_name() * 2.0 - Remove 1.x backwards compat * 2.1 - Adds orig_sys_metadata to rebuild_instance() * 2.2 - Adds slave_info parameter to add_aggregate_host() and remove_aggregate_host() * 2.3 - Adds volume_id to reserve_block_device_name() * 2.4 - Add bdms to terminate_instance * 2.5 - Add block device and network info to reboot_instance * 2.6 - Remove migration_id, add migration to resize_instance * 2.7 - Remove migration_id, add migration to confirm_resize * 2.8 - Remove migration_id, add migration to finish_resize * 2.9 - Add publish_service_capabilities() * 2.10 - Adds filter_properties and request_spec to prep_resize() * 2.11 - Adds soft_delete_instance() and restore_instance() * 2.12 - Remove migration_id, add migration to revert_resize * 2.13 - Remove migration_id, add migration to finish_revert_resize * 2.14 - Remove aggregate_id, add aggregate to add_aggregate_host * 2.15 - Remove aggregate_id, add aggregate to remove_aggregate_host * 2.16 - Add instance_type to resize_instance * 2.17 - Add get_backdoor_port() * 2.18 - Add bdms to rebuild_instance * 2.19 - Add node to run_instance * 2.20 - Add node to prep_resize * 2.21 - Add migrate_data dict param to pre_live_migration() * 2.22 - Add recreate, on_shared_storage and host arguments to rebuild_instance() * 2.23 - Remove network_info from reboot_instance * 2.24 - Added get_spice_console method * 2.25 - Add attach_interface() and detach_interface() * 2.26 - Add validate_console_port to ensure the service connects to vnc on the correct port * 2.27 - Adds 'reservations' to terminate_instance() and soft_delete_instance() ... Grizzly supports message version 2.27. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.27. * 2.28 - Adds check_instance_shared_storage() * 2.29 - Made start_instance() and stop_instance() take new-world instance objects * 2.30 - Adds live_snapshot_instance() * 2.31 - Adds shelve_instance(), shelve_offload_instance, and unshelve_instance() * 2.32 - Make reboot_instance take a new world instance object * 2.33 - Made suspend_instance() and resume_instance() take new-world instance objects * 2.34 - Added swap_volume() * 2.35 - Made terminate_instance() and soft_delete_instance() take new-world instance objects * 2.36 - Made pause_instance() and unpause_instance() take new-world instance objects * 2.37 - Added the legacy_bdm_in_spec parameter to run_instance * 2.38 - Made check_can_live_migrate_[destination|source] take new-world instance objects * 2.39 - Made revert_resize() and confirm_resize() take new-world instance objects * 2.40 - Made reset_network() take new-world instance object * 2.41 - Make inject_network_info take new-world instance object * 2.42 - Splits snapshot_instance() into snapshot_instance() and backup_instance() and makes them take new-world instance objects. * 2.43 - Made prep_resize() take new-world instance object * 2.44 - Add volume_snapshot_create(), volume_snapshot_delete() * 2.45 - Made resize_instance() take new-world objects * 2.46 - Made finish_resize() take new-world objects * 2.47 - Made finish_revert_resize() take new-world objects ... Havana supports message version 2.47. So, any changes to existing methods in 2.x after that point should be done such that they can handle the version_cap being set to 2.47. * 2.48 - Make add_aggregate_host() and remove_aggregate_host() take new-world objects * ... - Remove live_snapshot() that was never actually used * 3.0 - Remove 2.x compatibility * 3.1 - Update get_spice_console() to take an instance object * 3.2 - Update get_vnc_console() to take an instance object * 3.3 - Update validate_console_port() to take an instance object * 3.4 - Update rebuild_instance() to take an instance object * 3.5 - Pass preserve_ephemeral flag to rebuild_instance() * 3.6 - Make volume_snapshot_{create,delete} use new-world objects * 3.7 - Update change_instance_metadata() to take an instance object * 3.8 - Update set_admin_password() to take an instance object * 3.9 - Update rescue_instance() to take an instance object * 3.10 - Added get_rdp_console method * 3.11 - Update unrescue_instance() to take an object * 3.12 - Update add_fixed_ip_to_instance() to take an object * 3.13 - Update remove_fixed_ip_from_instance() to take an object * 3.14 - Update post_live_migration_at_destination() to take an object * 3.15 - Adds filter_properties and node to unshelve_instance() * 3.16 - Make reserve_block_device_name and attach_volume use new-world objects, and add disk_bus and device_type params to reserve_block_device_name, and bdm param to attach_volume * 3.17 - Update attach_interface and detach_interface to take an object * 3.18 - Update get_diagnostics() to take an instance object * Removed inject_file(), as it was unused. * 3.19 - Update pre_live_migration to take instance object * 3.20 - Make restore_instance take an instance object * 3.21 - Made rebuild take new-world BDM objects * 3.22 - Made terminate_instance take new-world BDM objects * 3.23 - Added external_instance_event() * build_and_run_instance was added in Havana and not used or documented. ... Icehouse supports message version 3.23. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.23. * 3.24 - Update rescue_instance() to take optional rescue_image_ref * 3.25 - Make detach_volume take an object * 3.26 - Make live_migration() and rollback_live_migration_at_destination() take an object * ... Removed run_instance() * 3.27 - Make run_instance() accept a new-world object * 3.28 - Update get_console_output() to accept a new-world object * 3.29 - Make check_instance_shared_storage accept a new-world object * 3.30 - Make remove_volume_connection() accept a new-world object * 3.31 - Add get_instance_diagnostics * 3.32 - Add destroy_disks and migrate_data optional parameters to rollback_live_migration_at_destination() * 3.33 - Make build_and_run_instance() take a NetworkRequestList object * 3.34 - Add get_serial_console method * 3.35 - Make reserve_block_device_name return a BDM object ... Juno supports message version 3.35. So, any changes to existing methods in 3.x after that point should be done such that they can handle the version_cap being set to 3.35. ''' VERSION_ALIASES = { 'icehouse': '3.23', 'juno': '3.35', } def __init__(self): super(ComputeAPI, self).__init__() target = messaging.Target(topic=CONF.compute_topic, version='3.0') version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.compute, CONF.upgrade_levels.compute) serializer = objects_base.NovaObjectSerializer() self.client = self.get_client(target, version_cap, serializer) # Cells overrides this def get_client(self, target, version_cap, serializer): return rpc.get_client(target, version_cap=version_cap, serializer=serializer) def add_aggregate_host(self, ctxt, aggregate, host_param, host, slave_info=None): '''Add aggregate host. :param ctxt: request context :param aggregate_id: :param host_param: This value is placed in the message to be the 'host' parameter for the remote method. :param host: This is the host to send the message to. ''' version = '3.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'add_aggregate_host', aggregate=aggregate, host=host_param, slave_info=slave_info) def add_fixed_ip_to_instance(self, ctxt, instance, network_id): version = '3.12' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'add_fixed_ip_to_instance', instance=instance, network_id=network_id) def attach_interface(self, ctxt, instance, network_id, port_id, requested_ip): version = '3.17' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'attach_interface', instance=instance, network_id=network_id, port_id=port_id, requested_ip=requested_ip) def attach_volume(self, ctxt, instance, volume_id, mountpoint, bdm=None): # NOTE(ndipanov): Remove volume_id and mountpoint on the next major # version bump - they are not needed when using bdm objects. version = '3.16' kw = {'instance': instance, 'volume_id': volume_id, 'mountpoint': mountpoint, 'bdm': bdm} cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'attach_volume', **kw) def change_instance_metadata(self, ctxt, instance, diff): version = '3.7' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'change_instance_metadata', instance=instance, diff=diff) def _warn_buggy_live_migrations(self, data=None): # NOTE(danms): We know that libvirt live migration with shared block # storage was buggy (potential loss of data) before version 3.32. # Since we need to support live migration with older clients, we need # to warn the operator of this possibility. The logic below tries to # decide if a warning should be emitted, assuming the positive if # not sure. This can be removed when we bump to RPC API version 4.0. if data: if data.get('is_shared_block_storage') is not False: # Shared block storage, or unknown should_warn = True else: # Specifically not shared block storage should_warn = False else: # Unknown, so warn to be safe should_warn = True if should_warn: LOG.warning(_LW('Live migration with clients before RPC version ' '3.32 is known to be buggy with shared block ' 'storage. See ' 'https://bugs.launchpad.net/nova/+bug/1250751 for ' 'more information!')) def check_can_live_migrate_destination(self, ctxt, instance, destination, block_migration, disk_over_commit): if self.client.can_send_version('3.32'): version = '3.32' else: version = '3.0' self._warn_buggy_live_migrations() cctxt = self.client.prepare(server=destination, version=version) return cctxt.call(ctxt, 'check_can_live_migrate_destination', instance=instance, block_migration=block_migration, disk_over_commit=disk_over_commit) def check_can_live_migrate_source(self, ctxt, instance, dest_check_data): if self.client.can_send_version('3.32'): version = '3.32' else: version = '3.0' self._warn_buggy_live_migrations() source = _compute_host(None, instance) cctxt = self.client.prepare(server=source, version=version) return cctxt.call(ctxt, 'check_can_live_migrate_source', instance=instance, dest_check_data=dest_check_data) def check_instance_shared_storage(self, ctxt, instance, data, host=None): if self.client.can_send_version('3.29'): version = '3.29' else: version = '3.0' instance = jsonutils.to_primitive(instance) cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) return cctxt.call(ctxt, 'check_instance_shared_storage', instance=instance, data=data) def confirm_resize(self, ctxt, instance, migration, host, reservations=None, cast=True): version = '3.0' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) rpc_method = cctxt.cast if cast else cctxt.call return rpc_method(ctxt, 'confirm_resize', instance=instance, migration=migration, reservations=reservations) def detach_interface(self, ctxt, instance, port_id): version = '3.17' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'detach_interface', instance=instance, port_id=port_id) def detach_volume(self, ctxt, instance, volume_id): if self.client.can_send_version('3.25'): version = '3.25' else: version = '3.0' instance = jsonutils.to_primitive(instance) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'detach_volume', instance=instance, volume_id=volume_id) def finish_resize(self, ctxt, instance, migration, image, disk_info, host, reservations=None): version = '3.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'finish_resize', instance=instance, migration=migration, image=image, disk_info=disk_info, reservations=reservations) def finish_revert_resize(self, ctxt, instance, migration, host, reservations=None): version = '3.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'finish_revert_resize', instance=instance, migration=migration, reservations=reservations) def get_console_output(self, ctxt, instance, tail_length): if self.client.can_send_version('3.28'): version = '3.28' else: version = '3.0' instance = jsonutils.to_primitive(instance) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_console_output', instance=instance, tail_length=tail_length) def get_console_pool_info(self, ctxt, console_type, host): version = '3.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'get_console_pool_info', console_type=console_type) def get_console_topic(self, ctxt, host): version = '3.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'get_console_topic') def get_diagnostics(self, ctxt, instance): version = '3.18' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_diagnostics', instance=instance) def get_instance_diagnostics(self, ctxt, instance): instance_p = jsonutils.to_primitive(instance) kwargs = {'instance': instance_p} version = '3.31' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_instance_diagnostics', **kwargs) def get_vnc_console(self, ctxt, instance, console_type): version = '3.2' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_vnc_console', instance=instance, console_type=console_type) def get_spice_console(self, ctxt, instance, console_type): version = '3.1' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_spice_console', instance=instance, console_type=console_type) def get_rdp_console(self, ctxt, instance, console_type): version = '3.10' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_rdp_console', instance=instance, console_type=console_type) def get_serial_console(self, ctxt, instance, console_type): version = '3.34' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'get_serial_console', instance=instance, console_type=console_type) def validate_console_port(self, ctxt, instance, port, console_type): version = '3.3' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'validate_console_port', instance=instance, port=port, console_type=console_type) def host_maintenance_mode(self, ctxt, host_param, mode, host): '''Set host maintenance mode :param ctxt: request context :param host_param: This value is placed in the message to be the 'host' parameter for the remote method. :param mode: :param host: This is the host to send the message to. ''' version = '3.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'host_maintenance_mode', host=host_param, mode=mode) def host_power_action(self, ctxt, action, host): version = '3.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'host_power_action', action=action) def inject_network_info(self, ctxt, instance): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'inject_network_info', instance=instance) def live_migration(self, ctxt, instance, dest, block_migration, host, migrate_data=None): if self.client.can_send_version('3.26'): version = '3.26' else: version = '3.0' instance = jsonutils.to_primitive(instance) cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'live_migration', instance=instance, dest=dest, block_migration=block_migration, migrate_data=migrate_data) def pause_instance(self, ctxt, instance): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'pause_instance', instance=instance) def post_live_migration_at_destination(self, ctxt, instance, block_migration, host): version = '3.14' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'post_live_migration_at_destination', instance=instance, block_migration=block_migration) def pre_live_migration(self, ctxt, instance, block_migration, disk, host, migrate_data=None): version = '3.19' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'pre_live_migration', instance=instance, block_migration=block_migration, disk=disk, migrate_data=migrate_data) def prep_resize(self, ctxt, image, instance, instance_type, host, reservations=None, request_spec=None, filter_properties=None, node=None): version = '3.0' instance_type_p = jsonutils.to_primitive(instance_type) image_p = jsonutils.to_primitive(image) cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'prep_resize', instance=instance, instance_type=instance_type_p, image=image_p, reservations=reservations, request_spec=request_spec, filter_properties=filter_properties, node=node) def reboot_instance(self, ctxt, instance, block_device_info, reboot_type): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'reboot_instance', instance=instance, block_device_info=block_device_info, reboot_type=reboot_type) def rebuild_instance(self, ctxt, instance, new_pass, injected_files, image_ref, orig_image_ref, orig_sys_metadata, bdms, recreate=False, on_shared_storage=False, host=None, preserve_ephemeral=False, kwargs=None): # NOTE(danms): kwargs is only here for cells compatibility, don't # actually send it to compute extra = {'preserve_ephemeral': preserve_ephemeral} version = '3.21' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'rebuild_instance', instance=instance, new_pass=new_pass, injected_files=injected_files, image_ref=image_ref, orig_image_ref=orig_image_ref, orig_sys_metadata=orig_sys_metadata, bdms=bdms, recreate=recreate, on_shared_storage=on_shared_storage, **extra) def refresh_provider_fw_rules(self, ctxt, host): version = '3.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'refresh_provider_fw_rules') def remove_aggregate_host(self, ctxt, aggregate, host_param, host, slave_info=None): '''Remove aggregate host. :param ctxt: request context :param aggregate_id: :param host_param: This value is placed in the message to be the 'host' parameter for the remote method. :param host: This is the host to send the message to. ''' version = '3.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'remove_aggregate_host', aggregate=aggregate, host=host_param, slave_info=slave_info) def remove_fixed_ip_from_instance(self, ctxt, instance, address): version = '3.13' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'remove_fixed_ip_from_instance', instance=instance, address=address) def remove_volume_connection(self, ctxt, instance, volume_id, host): if self.client.can_send_version('3.30'): version = '3.30' else: version = '3.0' instance = jsonutils.to_primitive(instance) cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'remove_volume_connection', instance=instance, volume_id=volume_id) def rescue_instance(self, ctxt, instance, rescue_password, rescue_image_ref=None): msg_args = {'rescue_password': rescue_password} if self.client.can_send_version('3.24'): version = '3.24' msg_args['rescue_image_ref'] = rescue_image_ref else: version = '3.9' msg_args['instance'] = instance cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'rescue_instance', **msg_args) def reset_network(self, ctxt, instance): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'reset_network', instance=instance) def resize_instance(self, ctxt, instance, migration, image, instance_type, reservations=None): version = '3.0' instance_type_p = jsonutils.to_primitive(instance_type) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'resize_instance', instance=instance, migration=migration, image=image, reservations=reservations, instance_type=instance_type_p) def resume_instance(self, ctxt, instance): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'resume_instance', instance=instance) def revert_resize(self, ctxt, instance, migration, host, reservations=None): version = '3.0' cctxt = self.client.prepare(server=_compute_host(host, instance), version=version) cctxt.cast(ctxt, 'revert_resize', instance=instance, migration=migration, reservations=reservations) def rollback_live_migration_at_destination(self, ctxt, instance, host, destroy_disks=True, migrate_data=None): if self.client.can_send_version('3.32'): version = '3.32' extra = {'destroy_disks': destroy_disks, 'migrate_data': migrate_data, } else: version = '3.0' extra = {} self._warn_buggy_live_migrations(migrate_data) cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'rollback_live_migration_at_destination', instance=instance, **extra) # NOTE(alaski): Remove this method when the scheduler rpc interface is # bumped to 4.x as the only callers of this method will be removed. def run_instance(self, ctxt, instance, host, request_spec, filter_properties, requested_networks, injected_files, admin_password, is_first_time, node=None, legacy_bdm_in_spec=True): if self.client.can_send_version('3.27'): version = '3.27' else: version = '3.0' instance = jsonutils.to_primitive(instance) msg_kwargs = {'instance': instance, 'request_spec': request_spec, 'filter_properties': filter_properties, 'requested_networks': requested_networks, 'injected_files': injected_files, 'admin_password': admin_password, 'is_first_time': is_first_time, 'node': node, 'legacy_bdm_in_spec': legacy_bdm_in_spec} cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'run_instance', **msg_kwargs) def set_admin_password(self, ctxt, instance, new_pass): version = '3.8' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) return cctxt.call(ctxt, 'set_admin_password', instance=instance, new_pass=new_pass) def set_host_enabled(self, ctxt, enabled, host): version = '3.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'set_host_enabled', enabled=enabled) def swap_volume(self, ctxt, instance, old_volume_id, new_volume_id): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'swap_volume', instance=instance, old_volume_id=old_volume_id, new_volume_id=new_volume_id) def get_host_uptime(self, ctxt, host): version = '3.0' cctxt = self.client.prepare(server=host, version=version) return cctxt.call(ctxt, 'get_host_uptime') def reserve_block_device_name(self, ctxt, instance, device, volume_id, disk_bus=None, device_type=None): kw = {'instance': instance, 'device': device, 'volume_id': volume_id, 'disk_bus': disk_bus, 'device_type': device_type, 'return_bdm_object': True} if self.client.can_send_version('3.35'): version = '3.35' else: del kw['return_bdm_object'] version = '3.16' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) volume_bdm = cctxt.call(ctxt, 'reserve_block_device_name', **kw) if not isinstance(volume_bdm, objects.BlockDeviceMapping): volume_bdm = objects.BlockDeviceMapping.get_by_volume_id( ctxt, volume_id) return volume_bdm def backup_instance(self, ctxt, instance, image_id, backup_type, rotation): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'backup_instance', instance=instance, image_id=image_id, backup_type=backup_type, rotation=rotation) def snapshot_instance(self, ctxt, instance, image_id): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'snapshot_instance', instance=instance, image_id=image_id) def start_instance(self, ctxt, instance): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'start_instance', instance=instance) def stop_instance(self, ctxt, instance, do_cast=True): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) rpc_method = cctxt.cast if do_cast else cctxt.call return rpc_method(ctxt, 'stop_instance', instance=instance) def suspend_instance(self, ctxt, instance): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'suspend_instance', instance=instance) def terminate_instance(self, ctxt, instance, bdms, reservations=None): version = '3.22' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'terminate_instance', instance=instance, bdms=bdms, reservations=reservations) def unpause_instance(self, ctxt, instance): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'unpause_instance', instance=instance) def unrescue_instance(self, ctxt, instance): version = '3.11' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'unrescue_instance', instance=instance) def soft_delete_instance(self, ctxt, instance, reservations=None): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'soft_delete_instance', instance=instance, reservations=reservations) def restore_instance(self, ctxt, instance): version = '3.20' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'restore_instance', instance=instance) def shelve_instance(self, ctxt, instance, image_id=None): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'shelve_instance', instance=instance, image_id=image_id) def shelve_offload_instance(self, ctxt, instance): version = '3.0' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'shelve_offload_instance', instance=instance) def unshelve_instance(self, ctxt, instance, host, image=None, filter_properties=None, node=None): version = '3.15' msg_kwargs = { 'instance': instance, 'image': image, 'filter_properties': filter_properties, 'node': node, } cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'unshelve_instance', **msg_kwargs) def volume_snapshot_create(self, ctxt, instance, volume_id, create_info): version = '3.6' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'volume_snapshot_create', instance=instance, volume_id=volume_id, create_info=create_info) def volume_snapshot_delete(self, ctxt, instance, volume_id, snapshot_id, delete_info): version = '3.6' cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'volume_snapshot_delete', instance=instance, volume_id=volume_id, snapshot_id=snapshot_id, delete_info=delete_info) def external_instance_event(self, ctxt, instances, events): cctxt = self.client.prepare( server=_compute_host(None, instances[0]), version='3.23') cctxt.cast(ctxt, 'external_instance_event', instances=instances, events=events) def build_and_run_instance(self, ctxt, instance, host, image, request_spec, filter_properties, admin_password=None, injected_files=None, requested_networks=None, security_groups=None, block_device_mapping=None, node=None, limits=None): version = '3.33' if not self.client.can_send_version(version): version = '3.23' if requested_networks is not None: if utils.is_neutron(): requested_networks = [(network_id, address, port_id) for (network_id, address, port_id, _) in requested_networks.as_tuples()] else: requested_networks = [(network_id, address) for (network_id, address) in requested_networks.as_tuples()] cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'build_and_run_instance', instance=instance, image=image, request_spec=request_spec, filter_properties=filter_properties, admin_password=admin_password, injected_files=injected_files, requested_networks=requested_networks, security_groups=security_groups, block_device_mapping=block_device_mapping, node=node, limits=limits) class SecurityGroupAPI(object): '''Client side of the security group rpc API. API version history: 1.0 - Initial version. 1.41 - Adds refresh_instance_security_rules() 2.0 - Remove 1.x backwards compat 3.0 - Identical to 2.x, but has to be bumped at the same time as the compute API since it's all together on the server side. ''' def __init__(self): super(SecurityGroupAPI, self).__init__() target = messaging.Target(topic=CONF.compute_topic, version='3.0') version_cap = ComputeAPI.VERSION_ALIASES.get( CONF.upgrade_levels.compute, CONF.upgrade_levels.compute) self.client = rpc.get_client(target, version_cap) def refresh_security_group_rules(self, ctxt, security_group_id, host): version = '3.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'refresh_security_group_rules', security_group_id=security_group_id) def refresh_security_group_members(self, ctxt, security_group_id, host): version = '3.0' cctxt = self.client.prepare(server=host, version=version) cctxt.cast(ctxt, 'refresh_security_group_members', security_group_id=security_group_id) def refresh_instance_security_rules(self, ctxt, host, instance): version = '3.0' instance_p = jsonutils.to_primitive(instance) cctxt = self.client.prepare(server=_compute_host(None, instance), version=version) cctxt.cast(ctxt, 'refresh_instance_security_rules', instance=instance_p)
""" CIE Chromaticity Diagrams Plotting ================================== Defines the *CIE* chromaticity diagrams plotting objects: - :func:`colour.plotting.plot_chromaticity_diagram_CIE1931` - :func:`colour.plotting.plot_chromaticity_diagram_CIE1960UCS` - :func:`colour.plotting.plot_chromaticity_diagram_CIE1976UCS` - :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1931` - :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1960UCS` - :func:`colour.plotting.plot_sds_in_chromaticity_diagram_CIE1976UCS` """ from __future__ import annotations import bisect import matplotlib.pyplot as plt import numpy as np from matplotlib.collections import LineCollection from matplotlib.patches import Polygon from colour.algebra import normalise_maximum, normalise_vector from colour.colorimetry import ( MultiSpectralDistributions, SDS_ILLUMINANTS, SpectralDistribution, sd_to_XYZ, sds_and_msds_to_sds, ) from colour.hints import ( Any, ArrayLike, Boolean, Callable, Dict, Floating, Integer, List, Literal, NDArray, Optional, Sequence, Tuple, Union, cast, ) from colour.models import ( Luv_to_uv, Luv_uv_to_xy, UCS_to_uv, UCS_uv_to_xy, XYZ_to_Luv, XYZ_to_UCS, XYZ_to_xy, xy_to_XYZ, ) from colour.notation import HEX_to_RGB from colour.plotting import ( CONSTANTS_COLOUR_STYLE, CONSTANTS_ARROW_STYLE, XYZ_to_plotting_colourspace, artist, filter_cmfs, filter_illuminants, override_style, render, update_settings_collection, ) from colour.utilities import ( as_float_array, domain_range_scale, first_item, is_string, optional, tsplit, tstack, suppress_warnings, validate_method, ) __author__ = "Colour Developers" __copyright__ = "Copyright 2013 Colour Developers" __license__ = "New BSD License - https://opensource.org/licenses/BSD-3-Clause" __maintainer__ = "Colour Developers" __email__ = "colour-developers@colour-science.org" __status__ = "Production" __all__ = [ "plot_spectral_locus", "plot_chromaticity_diagram_colours", "plot_chromaticity_diagram", "plot_chromaticity_diagram_CIE1931", "plot_chromaticity_diagram_CIE1960UCS", "plot_chromaticity_diagram_CIE1976UCS", "plot_sds_in_chromaticity_diagram", "plot_sds_in_chromaticity_diagram_CIE1931", "plot_sds_in_chromaticity_diagram_CIE1960UCS", "plot_sds_in_chromaticity_diagram_CIE1976UCS", ] @override_style() def plot_spectral_locus( cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", spectral_locus_colours: Optional[Union[ArrayLike, str]] = None, spectral_locus_opacity: Floating = 1, spectral_locus_labels: Optional[Sequence] = None, method: Union[ Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str ] = "CIE 1931", **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot the *Spectral Locus* according to given method. Parameters ---------- cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. spectral_locus_colours Colours of the *Spectral Locus*, if ``spectral_locus_colours`` is set to *RGB*, the colours will be computed according to the corresponding chromaticity coordinates. spectral_locus_opacity Opacity of the *Spectral Locus*. spectral_locus_labels Array of wavelength labels used to customise which labels will be drawn around the spectral locus. Passing an empty array will result in no wavelength labels being drawn. method *Chromaticity Diagram* method. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> plot_spectral_locus(spectral_locus_colours='RGB') # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Spectral_Locus.png :align: center :alt: plot_spectral_locus """ method = validate_method( method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"] ) spectral_locus_colours = optional( spectral_locus_colours, CONSTANTS_COLOUR_STYLE.colour.dark ) settings: Dict[str, Any] = {"uniform": True} settings.update(kwargs) _figure, axes = artist(**settings) cmfs = cast( MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values()) ) illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint wavelengths = list(cmfs.wavelengths) equal_energy = np.array([1 / 3] * 2) if method == "cie 1931": ij = XYZ_to_xy(cmfs.values, illuminant) labels = cast( Tuple, optional( spectral_locus_labels, ( 390, 460, 470, 480, 490, 500, 510, 520, 540, 560, 580, 600, 620, 700, ), ), ) elif method == "cie 1960 ucs": ij = UCS_to_uv(XYZ_to_UCS(cmfs.values)) labels = cast( Tuple, optional( spectral_locus_labels, ( 420, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580, 590, 600, 610, 620, 630, 645, 680, ), ), ) elif method == "cie 1976 ucs": ij = Luv_to_uv(XYZ_to_Luv(cmfs.values, illuminant), illuminant) labels = cast( Tuple, optional( spectral_locus_labels, ( 420, 440, 450, 460, 470, 480, 490, 500, 510, 520, 530, 540, 550, 560, 570, 580, 590, 600, 610, 620, 630, 645, 680, ), ), ) pl_ij = np.reshape( tstack( [ np.linspace(ij[0][0], ij[-1][0], 20), np.linspace(ij[0][1], ij[-1][1], 20), ] ), (-1, 1, 2), ) sl_ij = np.copy(ij).reshape(-1, 1, 2) purple_line_colours: Optional[Union[ArrayLike, str]] if str(spectral_locus_colours).upper() == "RGB": spectral_locus_colours = normalise_maximum( XYZ_to_plotting_colourspace(cmfs.values), axis=-1 ) if method == "cie 1931": XYZ = xy_to_XYZ(pl_ij) elif method == "cie 1960 ucs": XYZ = xy_to_XYZ(UCS_uv_to_xy(pl_ij)) elif method == "cie 1976 ucs": XYZ = xy_to_XYZ(Luv_uv_to_xy(pl_ij)) purple_line_colours = normalise_maximum( XYZ_to_plotting_colourspace(np.reshape(XYZ, (-1, 3))), axis=-1 ) else: purple_line_colours = spectral_locus_colours for slp_ij, slp_colours in ( (pl_ij, purple_line_colours), (sl_ij, spectral_locus_colours), ): line_collection = LineCollection( np.concatenate([slp_ij[:-1], slp_ij[1:]], axis=1), colors=slp_colours, alpha=spectral_locus_opacity, zorder=CONSTANTS_COLOUR_STYLE.zorder.midground_scatter, ) axes.add_collection(line_collection) wl_ij = dict(zip(wavelengths, ij)) for label in labels: ij_l = wl_ij.get(label) if ij_l is None: continue ij_l = as_float_array([ij_l]) i, j = tsplit(ij_l) index = bisect.bisect(wavelengths, label) left = wavelengths[index - 1] if index >= 0 else wavelengths[index] right = ( wavelengths[index] if index < len(wavelengths) else wavelengths[-1] ) dx = wl_ij[right][0] - wl_ij[left][0] dy = wl_ij[right][1] - wl_ij[left][1] direction = np.array([-dy, dx]) normal = ( np.array([-dy, dx]) if np.dot( normalise_vector(ij_l - equal_energy), normalise_vector(direction), ) > 0 else np.array([dy, -dx]) ) normal = normalise_vector(normal) / 30 label_colour = ( spectral_locus_colours if is_string(spectral_locus_colours) else spectral_locus_colours[index] # type: ignore[index] ) axes.plot( (i, i + normal[0] * 0.75), (j, j + normal[1] * 0.75), color=label_colour, alpha=spectral_locus_opacity, zorder=CONSTANTS_COLOUR_STYLE.zorder.background_line, ) axes.plot( i, j, "o", color=label_colour, alpha=spectral_locus_opacity, zorder=CONSTANTS_COLOUR_STYLE.zorder.background_line, ) axes.text( i + normal[0], j + normal[1], label, clip_on=True, ha="left" if normal[0] >= 0 else "right", va="center", fontdict={"size": "small"}, zorder=CONSTANTS_COLOUR_STYLE.zorder.background_label, ) settings = {"axes": axes} settings.update(kwargs) return render(**kwargs) @override_style() def plot_chromaticity_diagram_colours( samples: Integer = 256, diagram_colours: Optional[Union[ArrayLike, str]] = None, diagram_opacity: Floating = 1, diagram_clipping_path: Optional[ArrayLike] = None, cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", method: Union[ Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str ] = "CIE 1931", **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot the *Chromaticity Diagram* colours according to given method. Parameters ---------- samples Samples count on one axis when computing the *Chromaticity Diagram* colours. diagram_colours Colours of the *Chromaticity Diagram*, if ``diagram_colours`` is set to *RGB*, the colours will be computed according to the corresponding coordinates. diagram_opacity Opacity of the *Chromaticity Diagram*. diagram_clipping_path Path of points used to clip the *Chromaticity Diagram* colours. cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. method *Chromaticity Diagram* method. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> plot_chromaticity_diagram_colours(diagram_colours='RGB') ... # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_Colours.png :align: center :alt: plot_chromaticity_diagram_colours """ method = validate_method( method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"] ) settings: Dict[str, Any] = {"uniform": True} settings.update(kwargs) _figure, axes = artist(**settings) diagram_colours = cast( ArrayLike, optional( diagram_colours, HEX_to_RGB(CONSTANTS_COLOUR_STYLE.colour.average) ), ) cmfs = cast( MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values()) ) illuminant = CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint if method == "cie 1931": spectral_locus = XYZ_to_xy(cmfs.values, illuminant) elif method == "cie 1960 ucs": spectral_locus = UCS_to_uv(XYZ_to_UCS(cmfs.values)) elif method == "cie 1976 ucs": spectral_locus = Luv_to_uv( XYZ_to_Luv(cmfs.values, illuminant), illuminant ) use_RGB_diagram_colours = str(diagram_colours).upper() == "RGB" if use_RGB_diagram_colours: ii, jj = np.meshgrid( np.linspace(0, 1, samples), np.linspace(1, 0, samples) ) ij = tstack([ii, jj]) # NOTE: Various values in the grid have potential to generate # zero-divisions, they could be avoided by perturbing the grid, e.g. # adding a small epsilon. It was decided instead to disable warnings. with suppress_warnings(python_warnings=True): if method == "cie 1931": XYZ = xy_to_XYZ(ij) elif method == "cie 1960 ucs": XYZ = xy_to_XYZ(UCS_uv_to_xy(ij)) elif method == "cie 1976 ucs": XYZ = xy_to_XYZ(Luv_uv_to_xy(ij)) diagram_colours = normalise_maximum( XYZ_to_plotting_colourspace(XYZ, illuminant), axis=-1 ) polygon = Polygon( spectral_locus if diagram_clipping_path is None else diagram_clipping_path, facecolor="none" if use_RGB_diagram_colours else np.hstack([diagram_colours, diagram_opacity]), edgecolor="none" if use_RGB_diagram_colours else np.hstack([diagram_colours, diagram_opacity]), zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon, ) axes.add_patch(polygon) if use_RGB_diagram_colours: # Preventing bounding box related issues as per # https://github.com/matplotlib/matplotlib/issues/10529 image = axes.imshow( diagram_colours, interpolation="bilinear", extent=(0, 1, 0, 1), clip_path=None, alpha=diagram_opacity, zorder=CONSTANTS_COLOUR_STYLE.zorder.background_polygon, ) image.set_clip_path(polygon) settings = {"axes": axes} settings.update(kwargs) return render(**kwargs) @override_style() def plot_chromaticity_diagram( cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", show_diagram_colours: Boolean = True, show_spectral_locus: Boolean = True, method: Union[ Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str ] = "CIE 1931", **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot the *Chromaticity Diagram* according to given method. Parameters ---------- cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. show_diagram_colours Whether to display the *Chromaticity Diagram* background colours. show_spectral_locus Whether to display the *Spectral Locus*. method *Chromaticity Diagram* method. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_spectral_locus`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram_colours`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> plot_chromaticity_diagram() # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Chromaticity_Diagram.png :align: center :alt: plot_chromaticity_diagram """ method = validate_method( method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"] ) settings: Dict[str, Any] = {"uniform": True} settings.update(kwargs) _figure, axes = artist(**settings) cmfs = cast( MultiSpectralDistributions, first_item(filter_cmfs(cmfs).values()) ) if show_diagram_colours: settings = {"axes": axes, "method": method, "diagram_colours": "RGB"} settings.update(kwargs) settings["standalone"] = False settings["cmfs"] = cmfs plot_chromaticity_diagram_colours(**settings) if show_spectral_locus: settings = {"axes": axes, "method": method} settings.update(kwargs) settings["standalone"] = False settings["cmfs"] = cmfs plot_spectral_locus(**settings) if method == "cie 1931": x_label, y_label = "CIE x", "CIE y" elif method == "cie 1960 ucs": x_label, y_label = "CIE u", "CIE v" elif method == "cie 1976 ucs": x_label, y_label = ( "CIE u'", "CIE v'", ) title = f"{method.upper()} Chromaticity Diagram - {cmfs.strict_name}" settings.update( { "axes": axes, "standalone": True, "bounding_box": (0, 1, 0, 1), "title": title, "x_label": x_label, "y_label": y_label, } ) settings.update(kwargs) return render(**settings) @override_style() def plot_chromaticity_diagram_CIE1931( cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", show_diagram_colours: Boolean = True, show_spectral_locus: Boolean = True, **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot the *CIE 1931 Chromaticity Diagram*. Parameters ---------- cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. show_diagram_colours Whether to display the *Chromaticity Diagram* background colours. show_spectral_locus Whether to display the *Spectral Locus*. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> plot_chromaticity_diagram_CIE1931() # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1931.png :align: center :alt: plot_chromaticity_diagram_CIE1931 """ settings = dict(kwargs) settings.update({"method": "CIE 1931"}) return plot_chromaticity_diagram( cmfs, show_diagram_colours, show_spectral_locus, **settings ) @override_style() def plot_chromaticity_diagram_CIE1960UCS( cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", show_diagram_colours: Boolean = True, show_spectral_locus: Boolean = True, **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot the *CIE 1960 UCS Chromaticity Diagram*. Parameters ---------- cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. show_diagram_colours Whether to display the *Chromaticity Diagram* background colours. show_spectral_locus Whether to display the *Spectral Locus*. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> plot_chromaticity_diagram_CIE1960UCS() # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1960UCS.png :align: center :alt: plot_chromaticity_diagram_CIE1960UCS """ settings = dict(kwargs) settings.update({"method": "CIE 1960 UCS"}) return plot_chromaticity_diagram( cmfs, show_diagram_colours, show_spectral_locus, **settings ) @override_style() def plot_chromaticity_diagram_CIE1976UCS( cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", show_diagram_colours: Boolean = True, show_spectral_locus: Boolean = True, **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot the *CIE 1976 UCS Chromaticity Diagram*. Parameters ---------- cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. show_diagram_colours Whether to display the *Chromaticity Diagram* background colours. show_spectral_locus Whether to display the *Spectral Locus*. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> plot_chromaticity_diagram_CIE1976UCS() # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_Chromaticity_Diagram_CIE1976UCS.png :align: center :alt: plot_chromaticity_diagram_CIE1976UCS """ settings = dict(kwargs) settings.update({"method": "CIE 1976 UCS"}) return plot_chromaticity_diagram( cmfs, show_diagram_colours, show_spectral_locus, **settings ) @override_style() def plot_sds_in_chromaticity_diagram( sds: Union[ Sequence[Union[SpectralDistribution, MultiSpectralDistributions]], MultiSpectralDistributions, ], cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", chromaticity_diagram_callable: Callable = plot_chromaticity_diagram, method: Union[ Literal["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"], str ] = "CIE 1931", annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None, plot_kwargs: Optional[Union[Dict, List[Dict]]] = None, **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot given spectral distribution chromaticity coordinates into the *Chromaticity Diagram* using given method. Parameters ---------- sds Spectral distributions or multi-spectral distributions to plot. `sds` can be a single :class:`colour.MultiSpectralDistributions` class instance, a list of :class:`colour.MultiSpectralDistributions` class instances or a list of :class:`colour.SpectralDistribution` class instances. cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. chromaticity_diagram_callable Callable responsible for drawing the *Chromaticity Diagram*. method *Chromaticity Diagram* method. annotate_kwargs Keyword arguments for the :func:`matplotlib.pyplot.annotate` definition, used to annotate the resulting chromaticity coordinates with their respective spectral distribution names. ``annotate_kwargs`` can be either a single dictionary applied to all the arrows with same settings or a sequence of dictionaries with different settings for each spectral distribution. The following special keyword arguments can also be used: - ``annotate`` : Whether to annotate the spectral distributions. plot_kwargs Keyword arguments for the :func:`matplotlib.pyplot.plot` definition, used to control the style of the plotted spectral distributions. `plot_kwargs`` can be either a single dictionary applied to all the plotted spectral distributions with the same settings or a sequence of dictionaries with different settings for each plotted spectral distributions. The following special keyword arguments can also be used: - ``illuminant`` : The illuminant used to compute the spectral distributions colours. The default is the illuminant associated with the whitepoint of the default plotting colourspace. ``illuminant`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. - ``cmfs`` : The standard observer colour matching functions used for computing the spectral distributions colours. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. - ``normalise_sd_colours`` : Whether to normalise the computed spectral distributions colours. The default is *True*. - ``use_sd_colours`` : Whether to use the computed spectral distributions colours under the plotting colourspace illuminant. Alternatively, it is possible to use the :func:`matplotlib.pyplot.plot` definition ``color`` argument with pre-computed values. The default is *True*. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> A = SDS_ILLUMINANTS['A'] >>> D65 = SDS_ILLUMINANTS['D65'] >>> annotate_kwargs = [ ... {'xytext': (-25, 15), 'arrowprops':{'arrowstyle':'-'}}, ... {} ... ] >>> plot_kwargs = [ ... { ... 'illuminant': SDS_ILLUMINANTS['E'], ... 'markersize' : 15, ... 'normalise_sd_colours': True, ... 'use_sd_colours': True ... }, ... {'illuminant': SDS_ILLUMINANTS['E']}, ... ] >>> plot_sds_in_chromaticity_diagram( ... [A, D65], annotate_kwargs=annotate_kwargs, plot_kwargs=plot_kwargs) ... # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_Plot_SDS_In_Chromaticity_Diagram.png :align: center :alt: plot_sds_in_chromaticity_diagram """ method = validate_method( method, ["CIE 1931", "CIE 1960 UCS", "CIE 1976 UCS"] ) sds_converted = sds_and_msds_to_sds(sds) settings: Dict[str, Any] = {"uniform": True} settings.update(kwargs) _figure, axes = artist(**settings) settings.update( { "axes": axes, "standalone": False, "method": method, "cmfs": cmfs, } ) chromaticity_diagram_callable(**settings) if method == "cie 1931": def XYZ_to_ij(XYZ: NDArray) -> NDArray: """ Convert given *CIE XYZ* tristimulus values to *ij* chromaticity coordinates. """ return XYZ_to_xy(XYZ) bounding_box = (-0.1, 0.9, -0.1, 0.9) elif method == "cie 1960 ucs": def XYZ_to_ij(XYZ: NDArray) -> NDArray: """ Convert given *CIE XYZ* tristimulus values to *ij* chromaticity coordinates. """ return UCS_to_uv(XYZ_to_UCS(XYZ)) bounding_box = (-0.1, 0.7, -0.2, 0.6) elif method == "cie 1976 ucs": def XYZ_to_ij(XYZ: NDArray) -> NDArray: """ Convert given *CIE XYZ* tristimulus values to *ij* chromaticity coordinates. """ return Luv_to_uv(XYZ_to_Luv(XYZ)) bounding_box = (-0.1, 0.7, -0.1, 0.7) annotate_settings_collection = [ { "annotate": True, "xytext": (-50, 30), "textcoords": "offset points", "arrowprops": CONSTANTS_ARROW_STYLE, "zorder": CONSTANTS_COLOUR_STYLE.zorder.midground_annotation, } for _ in range(len(sds_converted)) ] if annotate_kwargs is not None: update_settings_collection( annotate_settings_collection, annotate_kwargs, len(sds_converted) ) plot_settings_collection = [ { "color": CONSTANTS_COLOUR_STYLE.colour.brightest, "label": f"{sd.strict_name}", "marker": "o", "markeredgecolor": CONSTANTS_COLOUR_STYLE.colour.dark, "markeredgewidth": CONSTANTS_COLOUR_STYLE.geometry.short * 0.75, "markersize": ( CONSTANTS_COLOUR_STYLE.geometry.short * 6 + CONSTANTS_COLOUR_STYLE.geometry.short * 0.75 ), "zorder": CONSTANTS_COLOUR_STYLE.zorder.midground_line, "cmfs": cmfs, "illuminant": SDS_ILLUMINANTS[ CONSTANTS_COLOUR_STYLE.colour.colourspace.whitepoint_name ], "use_sd_colours": False, "normalise_sd_colours": False, } for sd in sds_converted ] if plot_kwargs is not None: update_settings_collection( plot_settings_collection, plot_kwargs, len(sds_converted) ) for i, sd in enumerate(sds_converted): plot_settings = plot_settings_collection[i] cmfs = cast( MultiSpectralDistributions, first_item(filter_cmfs(plot_settings.pop("cmfs")).values()), ) illuminant = cast( SpectralDistribution, first_item( filter_illuminants(plot_settings.pop("illuminant")).values() ), ) normalise_sd_colours = plot_settings.pop("normalise_sd_colours") use_sd_colours = plot_settings.pop("use_sd_colours") with domain_range_scale("1"): XYZ = sd_to_XYZ(sd, cmfs, illuminant) if use_sd_colours: if normalise_sd_colours: XYZ /= XYZ[..., 1] plot_settings["color"] = np.clip( XYZ_to_plotting_colourspace(XYZ), 0, 1 ) ij = XYZ_to_ij(XYZ) axes.plot(ij[0], ij[1], **plot_settings) if sd.name is not None and annotate_settings_collection[i]["annotate"]: annotate_settings = annotate_settings_collection[i] annotate_settings.pop("annotate") axes.annotate(sd.name, xy=ij, **annotate_settings) settings.update({"standalone": True, "bounding_box": bounding_box}) settings.update(kwargs) return render(**settings) @override_style() def plot_sds_in_chromaticity_diagram_CIE1931( sds: Union[ Sequence[Union[SpectralDistribution, MultiSpectralDistributions]], MultiSpectralDistributions, ], cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", chromaticity_diagram_callable_CIE1931: Callable = ( plot_chromaticity_diagram_CIE1931 ), annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None, plot_kwargs: Optional[Union[Dict, List[Dict]]] = None, **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot given spectral distribution chromaticity coordinates into the *CIE 1931 Chromaticity Diagram*. Parameters ---------- sds Spectral distributions or multi-spectral distributions to plot. `sds` can be a single :class:`colour.MultiSpectralDistributions` class instance, a list of :class:`colour.MultiSpectralDistributions` class instances or a list of :class:`colour.SpectralDistribution` class instances. cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. chromaticity_diagram_callable_CIE1931 Callable responsible for drawing the *CIE 1931 Chromaticity Diagram*. annotate_kwargs Keyword arguments for the :func:`matplotlib.pyplot.annotate` definition, used to annotate the resulting chromaticity coordinates with their respective spectral distribution names. ``annotate_kwargs`` can be either a single dictionary applied to all the arrows with same settings or a sequence of dictionaries with different settings for each spectral distribution. The following special keyword arguments can also be used: - ``annotate`` : Whether to annotate the spectral distributions. plot_kwargs Keyword arguments for the :func:`matplotlib.pyplot.plot` definition, used to control the style of the plotted spectral distributions. `plot_kwargs`` can be either a single dictionary applied to all the plotted spectral distributions with the same settings or a sequence of dictionaries with different settings for each plotted spectral distributions. The following special keyword arguments can also be used: - ``illuminant`` : The illuminant used to compute the spectral distributions colours. The default is the illuminant associated with the whitepoint of the default plotting colourspace. ``illuminant`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. - ``cmfs`` : The standard observer colour matching functions used for computing the spectral distributions colours. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. - ``normalise_sd_colours`` : Whether to normalise the computed spectral distributions colours. The default is *True*. - ``use_sd_colours`` : Whether to use the computed spectral distributions colours under the plotting colourspace illuminant. Alternatively, it is possible to use the :func:`matplotlib.pyplot.plot` definition ``color`` argument with pre-computed values. The default is *True*. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> A = SDS_ILLUMINANTS['A'] >>> D65 = SDS_ILLUMINANTS['D65'] >>> plot_sds_in_chromaticity_diagram_CIE1931([A, D65]) ... # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_\ Plot_SDS_In_Chromaticity_Diagram_CIE1931.png :align: center :alt: plot_sds_in_chromaticity_diagram_CIE1931 """ settings = dict(kwargs) settings.update({"method": "CIE 1931"}) return plot_sds_in_chromaticity_diagram( sds, cmfs, chromaticity_diagram_callable_CIE1931, annotate_kwargs=annotate_kwargs, plot_kwargs=plot_kwargs, **settings, ) @override_style() def plot_sds_in_chromaticity_diagram_CIE1960UCS( sds: Union[ Sequence[Union[SpectralDistribution, MultiSpectralDistributions]], MultiSpectralDistributions, ], cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", chromaticity_diagram_callable_CIE1960UCS: Callable = ( plot_chromaticity_diagram_CIE1960UCS ), annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None, plot_kwargs: Optional[Union[Dict, List[Dict]]] = None, **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot given spectral distribution chromaticity coordinates into the *CIE 1960 UCS Chromaticity Diagram*. Parameters ---------- sds Spectral distributions or multi-spectral distributions to plot. `sds` can be a single :class:`colour.MultiSpectralDistributions` class instance, a list of :class:`colour.MultiSpectralDistributions` class instances or a list of :class:`colour.SpectralDistribution` class instances. cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. chromaticity_diagram_callable_CIE1960UCS Callable responsible for drawing the *CIE 1960 UCS Chromaticity Diagram*. annotate_kwargs Keyword arguments for the :func:`matplotlib.pyplot.annotate` definition, used to annotate the resulting chromaticity coordinates with their respective spectral distribution names. ``annotate_kwargs`` can be either a single dictionary applied to all the arrows with same settings or a sequence of dictionaries with different settings for each spectral distribution. The following special keyword arguments can also be used: - ``annotate`` : Whether to annotate the spectral distributions. plot_kwargs Keyword arguments for the :func:`matplotlib.pyplot.plot` definition, used to control the style of the plotted spectral distributions. `plot_kwargs`` can be either a single dictionary applied to all the plotted spectral distributions with the same settings or a sequence of dictionaries with different settings for each plotted spectral distributions. The following special keyword arguments can also be used: - ``illuminant`` : The illuminant used to compute the spectral distributions colours. The default is the illuminant associated with the whitepoint of the default plotting colourspace. ``illuminant`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. - ``cmfs`` : The standard observer colour matching functions used for computing the spectral distributions colours. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. - ``normalise_sd_colours`` : Whether to normalise the computed spectral distributions colours. The default is *True*. - ``use_sd_colours`` : Whether to use the computed spectral distributions colours under the plotting colourspace illuminant. Alternatively, it is possible to use the :func:`matplotlib.pyplot.plot` definition ``color`` argument with pre-computed values. The default is *True*. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> A = SDS_ILLUMINANTS['A'] >>> D65 = SDS_ILLUMINANTS['D65'] >>> plot_sds_in_chromaticity_diagram_CIE1960UCS([A, D65]) ... # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_\ Plot_SDS_In_Chromaticity_Diagram_CIE1960UCS.png :align: center :alt: plot_sds_in_chromaticity_diagram_CIE1960UCS """ settings = dict(kwargs) settings.update({"method": "CIE 1960 UCS"}) return plot_sds_in_chromaticity_diagram( sds, cmfs, chromaticity_diagram_callable_CIE1960UCS, annotate_kwargs=annotate_kwargs, plot_kwargs=plot_kwargs, **settings, ) @override_style() def plot_sds_in_chromaticity_diagram_CIE1976UCS( sds: Union[ Sequence[Union[SpectralDistribution, MultiSpectralDistributions]], MultiSpectralDistributions, ], cmfs: Union[ MultiSpectralDistributions, str, Sequence[Union[MultiSpectralDistributions, str]], ] = "CIE 1931 2 Degree Standard Observer", chromaticity_diagram_callable_CIE1976UCS: Callable = ( plot_chromaticity_diagram_CIE1976UCS ), annotate_kwargs: Optional[Union[Dict, List[Dict]]] = None, plot_kwargs: Optional[Union[Dict, List[Dict]]] = None, **kwargs: Any, ) -> Tuple[plt.Figure, plt.Axes]: """ Plot given spectral distribution chromaticity coordinates into the *CIE 1976 UCS Chromaticity Diagram*. Parameters ---------- sds Spectral distributions or multi-spectral distributions to plot. `sds` can be a single :class:`colour.MultiSpectralDistributions` class instance, a list of :class:`colour.MultiSpectralDistributions` class instances or a list of :class:`colour.SpectralDistribution` class instances. cmfs Standard observer colour matching functions used for computing the spectral locus boundaries. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. chromaticity_diagram_callable_CIE1976UCS Callable responsible for drawing the *CIE 1976 UCS Chromaticity Diagram*. annotate_kwargs Keyword arguments for the :func:`matplotlib.pyplot.annotate` definition, used to annotate the resulting chromaticity coordinates with their respective spectral distribution names. ``annotate_kwargs`` can be either a single dictionary applied to all the arrows with same settings or a sequence of dictionaries with different settings for each spectral distribution. The following special keyword arguments can also be used: - ``annotate`` : Whether to annotate the spectral distributions. plot_kwargs Keyword arguments for the :func:`matplotlib.pyplot.plot` definition, used to control the style of the plotted spectral distributions. `plot_kwargs`` can be either a single dictionary applied to all the plotted spectral distributions with the same settings or a sequence of dictionaries with different settings for each plotted spectral distributions. The following special keyword arguments can also be used: - ``illuminant`` : The illuminant used to compute the spectral distributions colours. The default is the illuminant associated with the whitepoint of the default plotting colourspace. ``illuminant`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. - ``cmfs`` : The standard observer colour matching functions used for computing the spectral distributions colours. ``cmfs`` can be of any type or form supported by the :func:`colour.plotting.filter_cmfs` definition. - ``normalise_sd_colours`` : Whether to normalise the computed spectral distributions colours. The default is *True*. - ``use_sd_colours`` : Whether to use the computed spectral distributions colours under the plotting colourspace illuminant. Alternatively, it is possible to use the :func:`matplotlib.pyplot.plot` definition ``color`` argument with pre-computed values. The default is *True*. Other Parameters ---------------- kwargs {:func:`colour.plotting.artist`, :func:`colour.plotting.diagrams.plot_chromaticity_diagram`, :func:`colour.plotting.render`}, See the documentation of the previously listed definitions. Returns ------- :class:`tuple` Current figure and axes. Examples -------- >>> A = SDS_ILLUMINANTS['A'] >>> D65 = SDS_ILLUMINANTS['D65'] >>> plot_sds_in_chromaticity_diagram_CIE1976UCS([A, D65]) ... # doctest: +ELLIPSIS (<Figure size ... with 1 Axes>, <...AxesSubplot...>) .. image:: ../_static/Plotting_\ Plot_SDS_In_Chromaticity_Diagram_CIE1976UCS.png :align: center :alt: plot_sds_in_chromaticity_diagram_CIE1976UCS """ settings = dict(kwargs) settings.update({"method": "CIE 1976 UCS"}) return plot_sds_in_chromaticity_diagram( sds, cmfs, chromaticity_diagram_callable_CIE1976UCS, annotate_kwargs=annotate_kwargs, plot_kwargs=plot_kwargs, **settings, )