code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def set_defaults(self): """ sets all empty fields for which a default value is defined to default value """ defaults = {} for i in range(len(self)): if i in self._data: continue default = self.get_field_descriptor(i).tags.get("default", [None])[0] if default is not None: defaults[i] = default self.update(defaults)
sets all empty fields for which a default value is defined to default value
Below is the the instruction that describes the task: ### Input: sets all empty fields for which a default value is defined to default value ### Response: def set_defaults(self): """ sets all empty fields for which a default value is defined to default value """ defaults = {} for i in range(len(self)): if i in self._data: continue default = self.get_field_descriptor(i).tags.get("default", [None])[0] if default is not None: defaults[i] = default self.update(defaults)
def _check_hint_bounds(self, ds): ''' Checks for variables ending with _bounds, if they are not cell methods, make the recommendation :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' ret_val = [] boundary_variables = cfutil.get_cell_boundary_variables(ds) for name in ds.variables: if name.endswith('_bounds') and name not in boundary_variables: msg = ('{} might be a cell boundary variable but there are no variables that define it ' 'as a boundary using the `bounds` attribute.'.format(name)) result = Result(BaseCheck.LOW, True, self.section_titles['7.1'], [msg]) ret_val.append(result) return ret_val
Checks for variables ending with _bounds, if they are not cell methods, make the recommendation :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results
Below is the the instruction that describes the task: ### Input: Checks for variables ending with _bounds, if they are not cell methods, make the recommendation :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ### Response: def _check_hint_bounds(self, ds): ''' Checks for variables ending with _bounds, if they are not cell methods, make the recommendation :param netCDF4.Dataset ds: An open netCDF dataset :rtype: list :return: List of results ''' ret_val = [] boundary_variables = cfutil.get_cell_boundary_variables(ds) for name in ds.variables: if name.endswith('_bounds') and name not in boundary_variables: msg = ('{} might be a cell boundary variable but there are no variables that define it ' 'as a boundary using the `bounds` attribute.'.format(name)) result = Result(BaseCheck.LOW, True, self.section_titles['7.1'], [msg]) ret_val.append(result) return ret_val
def option(self, opt): ''' Return options merged from config and pillar ''' if 'config.merge' in self.functions: return self.functions['config.merge'](opt, {}, omit_master=True) return self.opts.get(opt, {})
Return options merged from config and pillar
Below is the the instruction that describes the task: ### Input: Return options merged from config and pillar ### Response: def option(self, opt): ''' Return options merged from config and pillar ''' if 'config.merge' in self.functions: return self.functions['config.merge'](opt, {}, omit_master=True) return self.opts.get(opt, {})
def laid_out_pcoord(self, mesh_axis): """Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar. """ divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:]) modulus = self.shape[mesh_axis].size def my_fn(pnum): return (pnum // divisor) % modulus return self.slicewise(my_fn, self.laid_out_pnum())
Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar.
Below is the the instruction that describes the task: ### Input: Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar. ### Response: def laid_out_pcoord(self, mesh_axis): """Returns a LaidOutTensor containing the processor coordinate. Args: mesh_axis: int. Returns: LaidOutTensor where each slice is an integer scalar. """ divisor = list_product(self.shape.to_integer_list[mesh_axis + 1:]) modulus = self.shape[mesh_axis].size def my_fn(pnum): return (pnum // divisor) % modulus return self.slicewise(my_fn, self.laid_out_pnum())
def write_as_dot(self, f, data=None, max_levels=None): "Write the tree in the dot language format to f." assert (max_levels is None) or (max_levels >= 0) def visit_node(n, levels): lbl = "{" if data is None: if self.k <= max_k_labeled: lbl = repr(n.label()).\ replace("{","\{").\ replace("}","\}").\ replace("|","\|").\ replace("<","\<").\ replace(">","\>") else: lbl = str(n) else: s = self.bucket_to_block(n.bucket) for i in xrange(self.blocks_per_bucket): lbl += "{%s}" % (data[s+i]) if i + 1 != self.blocks_per_bucket: lbl += "|" lbl += "}" f.write(" %s [penwidth=%s,label=\"%s\"];\n" % (n.bucket, 1, lbl)) levels += 1 if (max_levels is None) or (levels <= max_levels): for i in xrange(self.k): cn = n.child_node(i) if not self.is_nil_node(cn): visit_node(cn, levels) f.write(" %s -> %s ;\n" % (n.bucket, cn.bucket)) f.write("// Created by SizedVirtualHeap.write_as_dot(...)\n") f.write("digraph heaptree {\n") f.write("node [shape=record]\n") if (max_levels is None) or (max_levels > 0): visit_node(self.root_node(), 1) f.write("}\n")
Write the tree in the dot language format to f.
Below is the the instruction that describes the task: ### Input: Write the tree in the dot language format to f. ### Response: def write_as_dot(self, f, data=None, max_levels=None): "Write the tree in the dot language format to f." assert (max_levels is None) or (max_levels >= 0) def visit_node(n, levels): lbl = "{" if data is None: if self.k <= max_k_labeled: lbl = repr(n.label()).\ replace("{","\{").\ replace("}","\}").\ replace("|","\|").\ replace("<","\<").\ replace(">","\>") else: lbl = str(n) else: s = self.bucket_to_block(n.bucket) for i in xrange(self.blocks_per_bucket): lbl += "{%s}" % (data[s+i]) if i + 1 != self.blocks_per_bucket: lbl += "|" lbl += "}" f.write(" %s [penwidth=%s,label=\"%s\"];\n" % (n.bucket, 1, lbl)) levels += 1 if (max_levels is None) or (levels <= max_levels): for i in xrange(self.k): cn = n.child_node(i) if not self.is_nil_node(cn): visit_node(cn, levels) f.write(" %s -> %s ;\n" % (n.bucket, cn.bucket)) f.write("// Created by SizedVirtualHeap.write_as_dot(...)\n") f.write("digraph heaptree {\n") f.write("node [shape=record]\n") if (max_levels is None) or (max_levels > 0): visit_node(self.root_node(), 1) f.write("}\n")
def getOutputElementCount(self, name): """ Returns the size of the output array """ if name in ["activeCells", "learnableCells", "sensoryAssociatedCells"]: return self.cellCount * self.moduleCount else: raise Exception("Invalid output name specified: " + name)
Returns the size of the output array
Below is the the instruction that describes the task: ### Input: Returns the size of the output array ### Response: def getOutputElementCount(self, name): """ Returns the size of the output array """ if name in ["activeCells", "learnableCells", "sensoryAssociatedCells"]: return self.cellCount * self.moduleCount else: raise Exception("Invalid output name specified: " + name)
def get_hosting_device_plugging_driver(self, context, id): """Returns plugging driver for hosting device template with <id>.""" if id is None: return try: return self._plugging_drivers[id] except KeyError: try: template = self._get_hosting_device_template(context, id) self._plugging_drivers[id] = importutils.import_object( template['plugging_driver']) except (ImportError, TypeError, n_exc.NeutronException): LOG.exception("Error loading plugging driver for hosting " "device template %s", id) return self._plugging_drivers.get(id)
Returns plugging driver for hosting device template with <id>.
Below is the the instruction that describes the task: ### Input: Returns plugging driver for hosting device template with <id>. ### Response: def get_hosting_device_plugging_driver(self, context, id): """Returns plugging driver for hosting device template with <id>.""" if id is None: return try: return self._plugging_drivers[id] except KeyError: try: template = self._get_hosting_device_template(context, id) self._plugging_drivers[id] = importutils.import_object( template['plugging_driver']) except (ImportError, TypeError, n_exc.NeutronException): LOG.exception("Error loading plugging driver for hosting " "device template %s", id) return self._plugging_drivers.get(id)
def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise
Below is the the instruction that describes the task: ### Input: Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise ### Response: def is_fqdn(hostname): """ Verify if hostname conforms to be a FQDN. :param hostname: text string with the name of the host :return: bool, True if hostname is correct FQDN, False otherwise """ compliant = re.compile(r"(?!-)[A-Z\d\-\_]{1,63}(?<!-)$", re.IGNORECASE) return "." in hostname and len(hostname) < 0xff and all(compliant.match(x) for x in hostname.rstrip(".").split("."))
def _rebuild_all_command_chains(self): """ Rebuilds execution chain for all registered commands. This method is typically called when intercepters are changed. Because of that it is more efficient to register intercepters before registering commands (typically it will be done in abstract classes). However, that performance penalty will be only once during creation time. """ self._commands_by_name = {} for command in self._commands: self._build_command_chain(command)
Rebuilds execution chain for all registered commands. This method is typically called when intercepters are changed. Because of that it is more efficient to register intercepters before registering commands (typically it will be done in abstract classes). However, that performance penalty will be only once during creation time.
Below is the the instruction that describes the task: ### Input: Rebuilds execution chain for all registered commands. This method is typically called when intercepters are changed. Because of that it is more efficient to register intercepters before registering commands (typically it will be done in abstract classes). However, that performance penalty will be only once during creation time. ### Response: def _rebuild_all_command_chains(self): """ Rebuilds execution chain for all registered commands. This method is typically called when intercepters are changed. Because of that it is more efficient to register intercepters before registering commands (typically it will be done in abstract classes). However, that performance penalty will be only once during creation time. """ self._commands_by_name = {} for command in self._commands: self._build_command_chain(command)
def call_actions_parallel(self, service_name, actions, **kwargs): """ Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and return once all responses have been received. Returns a list of action responses, one for each action in the same order as provided, or raises an exception if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response is an error (unless `raise_job_errors` is passed as `False`). This method performs expansions if the Client is configured with an expansion converter. :param service_name: The name of the service to call :type service_name: union[str, unicode] :param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects :type actions: iterable[union[ActionRequest, dict]] :param expansions: A dictionary representing the expansions to perform :type expansions: dict :param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults to `True`) :type raise_action_errors: bool :param timeout: If provided, this will override the default transport timeout values to; requests will expire after this number of seconds plus some buffer defined by the transport, and the client will not block waiting for a response for longer than this amount of time. :type timeout: int :param switches: A list of switch value integers :type switches: list :param correlation_id: The request correlation ID :type correlation_id: union[str, unicode] :param continue_on_error: Whether to continue executing further actions once one action has returned errors :type continue_on_error: bool :param context: A dictionary of extra values to include in the context header :type context: dict :param control_extra: A dictionary of extra values to include in the control header :type control_extra: dict :return: A generator of action responses :rtype: Generator[ActionResponse] :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError """ return self.call_actions_parallel_future(service_name, actions, **kwargs).result()
Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and return once all responses have been received. Returns a list of action responses, one for each action in the same order as provided, or raises an exception if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response is an error (unless `raise_job_errors` is passed as `False`). This method performs expansions if the Client is configured with an expansion converter. :param service_name: The name of the service to call :type service_name: union[str, unicode] :param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects :type actions: iterable[union[ActionRequest, dict]] :param expansions: A dictionary representing the expansions to perform :type expansions: dict :param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults to `True`) :type raise_action_errors: bool :param timeout: If provided, this will override the default transport timeout values to; requests will expire after this number of seconds plus some buffer defined by the transport, and the client will not block waiting for a response for longer than this amount of time. :type timeout: int :param switches: A list of switch value integers :type switches: list :param correlation_id: The request correlation ID :type correlation_id: union[str, unicode] :param continue_on_error: Whether to continue executing further actions once one action has returned errors :type continue_on_error: bool :param context: A dictionary of extra values to include in the context header :type context: dict :param control_extra: A dictionary of extra values to include in the control header :type control_extra: dict :return: A generator of action responses :rtype: Generator[ActionResponse] :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError
Below is the the instruction that describes the task: ### Input: Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and return once all responses have been received. Returns a list of action responses, one for each action in the same order as provided, or raises an exception if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response is an error (unless `raise_job_errors` is passed as `False`). This method performs expansions if the Client is configured with an expansion converter. :param service_name: The name of the service to call :type service_name: union[str, unicode] :param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects :type actions: iterable[union[ActionRequest, dict]] :param expansions: A dictionary representing the expansions to perform :type expansions: dict :param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults to `True`) :type raise_action_errors: bool :param timeout: If provided, this will override the default transport timeout values to; requests will expire after this number of seconds plus some buffer defined by the transport, and the client will not block waiting for a response for longer than this amount of time. :type timeout: int :param switches: A list of switch value integers :type switches: list :param correlation_id: The request correlation ID :type correlation_id: union[str, unicode] :param continue_on_error: Whether to continue executing further actions once one action has returned errors :type continue_on_error: bool :param context: A dictionary of extra values to include in the context header :type context: dict :param control_extra: A dictionary of extra values to include in the control header :type control_extra: dict :return: A generator of action responses :rtype: Generator[ActionResponse] :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError ### Response: def call_actions_parallel(self, service_name, actions, **kwargs): """ Build and send multiple job requests to one service, each job with one action, to be executed in parallel, and return once all responses have been received. Returns a list of action responses, one for each action in the same order as provided, or raises an exception if any action response is an error (unless `raise_action_errors` is passed as `False`) or if any job response is an error (unless `raise_job_errors` is passed as `False`). This method performs expansions if the Client is configured with an expansion converter. :param service_name: The name of the service to call :type service_name: union[str, unicode] :param actions: A list of `ActionRequest` objects and/or dicts that can be converted to `ActionRequest` objects :type actions: iterable[union[ActionRequest, dict]] :param expansions: A dictionary representing the expansions to perform :type expansions: dict :param raise_action_errors: Whether to raise a CallActionError if any action responses contain errors (defaults to `True`) :type raise_action_errors: bool :param timeout: If provided, this will override the default transport timeout values to; requests will expire after this number of seconds plus some buffer defined by the transport, and the client will not block waiting for a response for longer than this amount of time. :type timeout: int :param switches: A list of switch value integers :type switches: list :param correlation_id: The request correlation ID :type correlation_id: union[str, unicode] :param continue_on_error: Whether to continue executing further actions once one action has returned errors :type continue_on_error: bool :param context: A dictionary of extra values to include in the context header :type context: dict :param control_extra: A dictionary of extra values to include in the control header :type control_extra: dict :return: A generator of action responses :rtype: Generator[ActionResponse] :raise: ConnectionError, InvalidField, MessageSendError, MessageSendTimeout, MessageTooLarge, MessageReceiveError, MessageReceiveTimeout, InvalidMessage, JobError, CallActionError """ return self.call_actions_parallel_future(service_name, actions, **kwargs).result()
def stop(self): """ Set the Event lock, this will break all threads' loops. """ self.running = False # stop the command server try: self.commands_thread.kill() except: # noqa e722 pass try: self.lock.set() if self.config["debug"]: self.log("lock set, exiting") # run kill() method on all py3status modules for module in self.modules.values(): module.kill() except: # noqa e722 pass
Set the Event lock, this will break all threads' loops.
Below is the the instruction that describes the task: ### Input: Set the Event lock, this will break all threads' loops. ### Response: def stop(self): """ Set the Event lock, this will break all threads' loops. """ self.running = False # stop the command server try: self.commands_thread.kill() except: # noqa e722 pass try: self.lock.set() if self.config["debug"]: self.log("lock set, exiting") # run kill() method on all py3status modules for module in self.modules.values(): module.kill() except: # noqa e722 pass
def get_feature_type(feature_column): """ For a given feature, determine if it is real, binary or constant. Here binary means that only two unique values occur in the feature. :param feature_column: The feature column :type feature_column: pandas.Series :return: 'constant', 'binary' or 'real' """ n_unique_values = len(set(feature_column.values)) if n_unique_values == 1: _logger.warning("[test_feature_significance] Feature {} is constant".format(feature_column.name)) return 'constant' elif n_unique_values == 2: return 'binary' else: return 'real'
For a given feature, determine if it is real, binary or constant. Here binary means that only two unique values occur in the feature. :param feature_column: The feature column :type feature_column: pandas.Series :return: 'constant', 'binary' or 'real'
Below is the the instruction that describes the task: ### Input: For a given feature, determine if it is real, binary or constant. Here binary means that only two unique values occur in the feature. :param feature_column: The feature column :type feature_column: pandas.Series :return: 'constant', 'binary' or 'real' ### Response: def get_feature_type(feature_column): """ For a given feature, determine if it is real, binary or constant. Here binary means that only two unique values occur in the feature. :param feature_column: The feature column :type feature_column: pandas.Series :return: 'constant', 'binary' or 'real' """ n_unique_values = len(set(feature_column.values)) if n_unique_values == 1: _logger.warning("[test_feature_significance] Feature {} is constant".format(feature_column.name)) return 'constant' elif n_unique_values == 2: return 'binary' else: return 'real'
def run_simulations(self, parameter_list, data_folder): """ This function runs multiple simulations in parallel. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to create output folders. """ self.data_folder = data_folder with Pool(processes=MAX_PARALLEL_PROCESSES) as pool: for result in pool.imap_unordered(self.launch_simulation, parameter_list): yield result
This function runs multiple simulations in parallel. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to create output folders.
Below is the the instruction that describes the task: ### Input: This function runs multiple simulations in parallel. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to create output folders. ### Response: def run_simulations(self, parameter_list, data_folder): """ This function runs multiple simulations in parallel. Args: parameter_list (list): list of parameter combinations to simulate. data_folder (str): folder in which to create output folders. """ self.data_folder = data_folder with Pool(processes=MAX_PARALLEL_PROCESSES) as pool: for result in pool.imap_unordered(self.launch_simulation, parameter_list): yield result
def run(self, name, goea_nts, log): """Run gene product ASCII art.""" objaart = AArtGeneProductSetsOne(name, goea_nts, self) if self.hdrobj.sections: return objaart.prt_report_grp1(log) else: return objaart.prt_report_grp0(log)
Run gene product ASCII art.
Below is the the instruction that describes the task: ### Input: Run gene product ASCII art. ### Response: def run(self, name, goea_nts, log): """Run gene product ASCII art.""" objaart = AArtGeneProductSetsOne(name, goea_nts, self) if self.hdrobj.sections: return objaart.prt_report_grp1(log) else: return objaart.prt_report_grp0(log)
def CopyToDatetime(cls, timestamp, timezone, raise_error=False): """Copies the timestamp to a datetime object. Args: timestamp: The timestamp which is an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. timezone: The timezone (pytz.timezone) object. raise_error: Boolean that if set to True will not absorb an OverflowError if the timestamp is out of bounds. By default there will be no error raised. Returns: A datetime object (instance of datetime.datetime). A datetime object of January 1, 1970 00:00:00 UTC is returned on error if raises_error is not set. Raises: OverflowError: If raises_error is set to True and an overflow error occurs. ValueError: If raises_error is set to True and no timestamp value is provided. """ datetime_object = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC) if not timestamp: if raise_error: raise ValueError('Missing timestamp value') return datetime_object try: datetime_object += datetime.timedelta(microseconds=timestamp) return datetime_object.astimezone(timezone) except OverflowError as exception: if raise_error: raise logging.error(( 'Unable to copy {0:d} to a datetime object with error: ' '{1!s}').format(timestamp, exception)) return datetime_object
Copies the timestamp to a datetime object. Args: timestamp: The timestamp which is an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. timezone: The timezone (pytz.timezone) object. raise_error: Boolean that if set to True will not absorb an OverflowError if the timestamp is out of bounds. By default there will be no error raised. Returns: A datetime object (instance of datetime.datetime). A datetime object of January 1, 1970 00:00:00 UTC is returned on error if raises_error is not set. Raises: OverflowError: If raises_error is set to True and an overflow error occurs. ValueError: If raises_error is set to True and no timestamp value is provided.
Below is the the instruction that describes the task: ### Input: Copies the timestamp to a datetime object. Args: timestamp: The timestamp which is an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. timezone: The timezone (pytz.timezone) object. raise_error: Boolean that if set to True will not absorb an OverflowError if the timestamp is out of bounds. By default there will be no error raised. Returns: A datetime object (instance of datetime.datetime). A datetime object of January 1, 1970 00:00:00 UTC is returned on error if raises_error is not set. Raises: OverflowError: If raises_error is set to True and an overflow error occurs. ValueError: If raises_error is set to True and no timestamp value is provided. ### Response: def CopyToDatetime(cls, timestamp, timezone, raise_error=False): """Copies the timestamp to a datetime object. Args: timestamp: The timestamp which is an integer containing the number of micro seconds since January 1, 1970, 00:00:00 UTC. timezone: The timezone (pytz.timezone) object. raise_error: Boolean that if set to True will not absorb an OverflowError if the timestamp is out of bounds. By default there will be no error raised. Returns: A datetime object (instance of datetime.datetime). A datetime object of January 1, 1970 00:00:00 UTC is returned on error if raises_error is not set. Raises: OverflowError: If raises_error is set to True and an overflow error occurs. ValueError: If raises_error is set to True and no timestamp value is provided. """ datetime_object = datetime.datetime(1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC) if not timestamp: if raise_error: raise ValueError('Missing timestamp value') return datetime_object try: datetime_object += datetime.timedelta(microseconds=timestamp) return datetime_object.astimezone(timezone) except OverflowError as exception: if raise_error: raise logging.error(( 'Unable to copy {0:d} to a datetime object with error: ' '{1!s}').format(timestamp, exception)) return datetime_object
def tempdeny(ip=None, ttl=None, port=None, direction=None, comment=''): ''' Add a rule to the temporary ip deny list. See :func:`_access_rule`. 1- Add an IP: CLI Example: .. code-block:: bash salt '*' csf.tempdeny 127.0.0.1 300 port=22 direction='in' comment='# Brute force attempt' ''' return _tmp_access_rule('tempdeny', ip, ttl, port, direction, comment)
Add a rule to the temporary ip deny list. See :func:`_access_rule`. 1- Add an IP: CLI Example: .. code-block:: bash salt '*' csf.tempdeny 127.0.0.1 300 port=22 direction='in' comment='# Brute force attempt'
Below is the the instruction that describes the task: ### Input: Add a rule to the temporary ip deny list. See :func:`_access_rule`. 1- Add an IP: CLI Example: .. code-block:: bash salt '*' csf.tempdeny 127.0.0.1 300 port=22 direction='in' comment='# Brute force attempt' ### Response: def tempdeny(ip=None, ttl=None, port=None, direction=None, comment=''): ''' Add a rule to the temporary ip deny list. See :func:`_access_rule`. 1- Add an IP: CLI Example: .. code-block:: bash salt '*' csf.tempdeny 127.0.0.1 300 port=22 direction='in' comment='# Brute force attempt' ''' return _tmp_access_rule('tempdeny', ip, ttl, port, direction, comment)
def main(): """Main entry point""" parser = OptionParser() parser.add_option('-a', '--hostname', help='ClamAV source server hostname', dest='hostname', type='str', default='db.de.clamav.net') parser.add_option('-r', '--text-record', help='ClamAV Updates TXT record', dest='txtrecord', type='str', default='current.cvd.clamav.net') parser.add_option('-w', '--work-directory', help='Working directory', dest='workdir', type='str', default='/var/spool/clamav-mirror') parser.add_option('-d', '--mirror-directory', help='The mirror directory', dest='mirrordir', type='str', default='/srv/www/clamav') parser.add_option('-u', '--user', help='Change file owner to this user', dest='user', type='str', default='nginx') parser.add_option('-g', '--group', help='Change file group to this group', dest='group', type='str', default='nginx') parser.add_option('-l', '--locks-directory', help='Lock files directory', dest='lockdir', type='str', default='/var/lock/subsys') parser.add_option('-v', '--verbose', help='Display verbose output', dest='verbose', action='store_true', default=False) options, _ = parser.parse_args() try: lockfile = os.path.join(options.lockdir, 'clamavmirror') with open(lockfile, 'w+') as lock: fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB) work(options) except IOError: info("=> Another instance is already running") sys.exit(254)
Main entry point
Below is the the instruction that describes the task: ### Input: Main entry point ### Response: def main(): """Main entry point""" parser = OptionParser() parser.add_option('-a', '--hostname', help='ClamAV source server hostname', dest='hostname', type='str', default='db.de.clamav.net') parser.add_option('-r', '--text-record', help='ClamAV Updates TXT record', dest='txtrecord', type='str', default='current.cvd.clamav.net') parser.add_option('-w', '--work-directory', help='Working directory', dest='workdir', type='str', default='/var/spool/clamav-mirror') parser.add_option('-d', '--mirror-directory', help='The mirror directory', dest='mirrordir', type='str', default='/srv/www/clamav') parser.add_option('-u', '--user', help='Change file owner to this user', dest='user', type='str', default='nginx') parser.add_option('-g', '--group', help='Change file group to this group', dest='group', type='str', default='nginx') parser.add_option('-l', '--locks-directory', help='Lock files directory', dest='lockdir', type='str', default='/var/lock/subsys') parser.add_option('-v', '--verbose', help='Display verbose output', dest='verbose', action='store_true', default=False) options, _ = parser.parse_args() try: lockfile = os.path.join(options.lockdir, 'clamavmirror') with open(lockfile, 'w+') as lock: fcntl.lockf(lock, fcntl.LOCK_EX | fcntl.LOCK_NB) work(options) except IOError: info("=> Another instance is already running") sys.exit(254)
def monitor(self, listener): """Relay the stream to listener until told to stop. """ for line in self._stream(): self._record.append(line) if self.verbose: self.out.blather(line) if listener(line) is self.MONITOR_STOP: return
Relay the stream to listener until told to stop.
Below is the the instruction that describes the task: ### Input: Relay the stream to listener until told to stop. ### Response: def monitor(self, listener): """Relay the stream to listener until told to stop. """ for line in self._stream(): self._record.append(line) if self.verbose: self.out.blather(line) if listener(line) is self.MONITOR_STOP: return
def _do_scale(image, size): """Rescale the image by scaling the smaller spatial dimension to `size`.""" shape = tf.cast(tf.shape(image), tf.float32) w_greater = tf.greater(shape[0], shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return tf.image.resize_bicubic([image], shape)[0]
Rescale the image by scaling the smaller spatial dimension to `size`.
Below is the the instruction that describes the task: ### Input: Rescale the image by scaling the smaller spatial dimension to `size`. ### Response: def _do_scale(image, size): """Rescale the image by scaling the smaller spatial dimension to `size`.""" shape = tf.cast(tf.shape(image), tf.float32) w_greater = tf.greater(shape[0], shape[1]) shape = tf.cond(w_greater, lambda: tf.cast([shape[0] / shape[1] * size, size], tf.int32), lambda: tf.cast([size, shape[1] / shape[0] * size], tf.int32)) return tf.image.resize_bicubic([image], shape)[0]
def cli(ctx, amount, index, stage): """Pull, Transform, Push,streaming inside a pipe(experimental).""" ctx.obj.say_green('Starting Streaming Pipe') res_pull = ctx.invoke(pull, amount=amount, index=index, stage=stage) res_tra = False if res_pull: # amount to transform can be less (or more) res_tra = ctx.invoke( transform, amount=amount, index=index, stage=stage) if res_tra: # amount to push can be less (or more) res_push = ctx.invoke(push, amount=amount, index=index, stage=stage) if res_pull and res_tra and res_push: ctx.obj.say_green('Streaming Pipe finsished') return True return False
Pull, Transform, Push,streaming inside a pipe(experimental).
Below is the the instruction that describes the task: ### Input: Pull, Transform, Push,streaming inside a pipe(experimental). ### Response: def cli(ctx, amount, index, stage): """Pull, Transform, Push,streaming inside a pipe(experimental).""" ctx.obj.say_green('Starting Streaming Pipe') res_pull = ctx.invoke(pull, amount=amount, index=index, stage=stage) res_tra = False if res_pull: # amount to transform can be less (or more) res_tra = ctx.invoke( transform, amount=amount, index=index, stage=stage) if res_tra: # amount to push can be less (or more) res_push = ctx.invoke(push, amount=amount, index=index, stage=stage) if res_pull and res_tra and res_push: ctx.obj.say_green('Streaming Pipe finsished') return True return False
def _load_ini_based_io(path, recursive=False, ini=None, subini={}, include_core=True, only_coefficients=False): """ DEPRECATED: For convert a previous version to the new json format Loads a IOSystem or Extension from a ini files This function can be used to load a IOSystem or Extension specified in a ini file. DataFrames (tables) are loaded from text or binary pickle files. For the latter, the extension .pkl or .pickle is assumed, in all other case the tables are assumed to be in .txt format. Parameters ---------- path : string path or ini file name for the data to load recursive : boolean, optional If True, load also the data in the subfolders and add them as extensions to the IOSystem (in that case path must point to the root). Only first order subfolders are considered (no subfolders in subfolders) and if a folder does not contain a ini file it's skipped. Use the subini parameter in case of multiple ini files in a subfolder. Attribute name of the extension in the IOSystem are based on the subfolder name. Default is False ini : string, optional If there are several ini files in the root folder, take this one for loading the data If None (default) take the ini found in the folder, error if several are found subini : dict, optional If there are multiple ini in the subfolder, use the ini given in the dict. Format: 'subfoldername':'ininame' If a key for a subfolder is not found or None (default), the ini found in the folder will be taken, error if several are found include_core : boolean, optional If False the load method does not include A, L and Z matrix. This significantly reduces the required memory if the purpose is only to analyse the results calculated beforehand. Returns ------- IOSystem or Extension class depending on systemtype in the ini file None in case of errors """ # check path and given parameter ini_file_name = None path = os.path.abspath(os.path.normpath(path)) if os.path.splitext(path)[1] == '.ini': (path, ini_file_name) = os.path.split(path) if ini: ini_file_name = ini if not os.path.exists(path): raise ReadError('Given path does not exist') return None if not ini_file_name: _inifound = False for file in os.listdir(path): if os.path.splitext(file)[1] == '.ini': if _inifound: raise ReadError( 'Found multiple ini files in folder - specify one') return None ini_file_name = file _inifound = True # read the ini io_ini = configparser.RawConfigParser() io_ini.optionxform = lambda option: option io_ini.read(os.path.join(path, ini_file_name)) systemtype = io_ini.get('systemtype', 'systemtype', fallback=None) name = io_ini.get('meta', 'name', fallback=os.path.splitext(ini_file_name)[0]) if systemtype == 'IOSystem': ret_system = IOSystem(name=name) elif systemtype == 'Extension': ret_system = Extension(name=name) else: raise ReadError('System not defined in ini') return None for key in io_ini['meta']: setattr(ret_system, key, io_ini.get('meta', key, fallback=None)) for key in io_ini['files']: if '_nr_index_col' in key: continue if '_nr_header' in key: continue if not include_core: not_to_load = ['A', 'L', 'Z'] if key in not_to_load: continue if only_coefficients: _io = IOSystem() if key not in _io.__coefficients__ + ['unit']: continue file_name = io_ini.get('files', key) nr_index_col = io_ini.get( 'files', key + '_nr_index_col', fallback=None) nr_header = io_ini.get('files', key + '_nr_header', fallback=None) if (nr_index_col is None) or (nr_header is None): raise ReadError( 'Index or column specification missing for {}'. format(str(file_name))) return None _index_col = list(range(int(nr_index_col))) _header = list(range(int(nr_header))) if _index_col == [0]: _index_col = 0 if _header == [0]: _header = 0 file = os.path.join(path, file_name) logging.info('Load data from {}'.format(file)) if (os.path.splitext(file)[1] == '.pkl' or os.path.splitext(file)[1] == '.pickle'): setattr(ret_system, key, pd.read_pickle(file)) else: setattr(ret_system, key, pd.read_table(file, index_col=_index_col, header=_header)) if recursive: # look for subfolder in the given path subfolder_list = os.walk(path).__next__()[1] # loop all subfolder and append extension based on # ini file in subfolder for subfolder in subfolder_list: subini_file_name = subini.get(subfolder) subpath = os.path.abspath(os.path.join(path, subfolder)) if not subini_file_name: _inifound = False for file in os.listdir(subpath): if os.path.splitext(file)[1] == '.ini': if _inifound: raise ReadError( 'Found multiple ini files in subfolder ' '{} - specify one'.format(subpath)) return None subini_file_name = file _inifound = True if not _inifound: continue # read the ini subio_ini = configparser.RawConfigParser() subio_ini.optionxform = lambda option: option subio_ini.read(os.path.join(subpath, subini_file_name)) systemtype = subio_ini.get('systemtype', 'systemtype', fallback=None) name = subio_ini.get('meta', 'name', fallback=os.path.splitext( subini_file_name)[0]) if systemtype == 'IOSystem': raise ReadError('IOSystem found in subfolder {} - ' 'only extensions expected'.format(subpath)) return None elif systemtype == 'Extension': sub_system = Extension(name=name) else: raise ReadError('System not defined in ini') return None for key in subio_ini['meta']: setattr(sub_system, key, subio_ini.get('meta', key, fallback=None)) for key in subio_ini['files']: if '_nr_index_col' in key: continue if '_nr_header' in key: continue if only_coefficients: _ext = Extension('temp') if key not in _ext.__coefficients__ + ['unit']: continue file_name = subio_ini.get('files', key) nr_index_col = subio_ini.get('files', key + '_nr_index_col', fallback=None) nr_header = subio_ini.get('files', key + '_nr_header', fallback=None) if (nr_index_col is None) or (nr_header is None): raise ReadError('Index or column specification missing ' 'for {}'.format(str(file_name))) return None _index_col = list(range(int(nr_index_col))) _header = list(range(int(nr_header))) if _index_col == [0]: _index_col = 0 if _header == [0]: _header = 0 file = os.path.join(subpath, file_name) logging.info('Load data from {}'.format(file)) if (os.path.splitext(file)[1] == '.pkl' or os.path.splitext(file)[1] == '.pickle'): setattr(sub_system, key, pd.read_pickle(file)) else: setattr(sub_system, key, pd.read_table(file, index_col=_index_col, header=_header)) # get valid python name from folder def clean(varStr): return re.sub('\W|^(?=\d)', '_', str(varStr)) setattr(ret_system, clean(subfolder), sub_system) return ret_system
DEPRECATED: For convert a previous version to the new json format Loads a IOSystem or Extension from a ini files This function can be used to load a IOSystem or Extension specified in a ini file. DataFrames (tables) are loaded from text or binary pickle files. For the latter, the extension .pkl or .pickle is assumed, in all other case the tables are assumed to be in .txt format. Parameters ---------- path : string path or ini file name for the data to load recursive : boolean, optional If True, load also the data in the subfolders and add them as extensions to the IOSystem (in that case path must point to the root). Only first order subfolders are considered (no subfolders in subfolders) and if a folder does not contain a ini file it's skipped. Use the subini parameter in case of multiple ini files in a subfolder. Attribute name of the extension in the IOSystem are based on the subfolder name. Default is False ini : string, optional If there are several ini files in the root folder, take this one for loading the data If None (default) take the ini found in the folder, error if several are found subini : dict, optional If there are multiple ini in the subfolder, use the ini given in the dict. Format: 'subfoldername':'ininame' If a key for a subfolder is not found or None (default), the ini found in the folder will be taken, error if several are found include_core : boolean, optional If False the load method does not include A, L and Z matrix. This significantly reduces the required memory if the purpose is only to analyse the results calculated beforehand. Returns ------- IOSystem or Extension class depending on systemtype in the ini file None in case of errors
Below is the the instruction that describes the task: ### Input: DEPRECATED: For convert a previous version to the new json format Loads a IOSystem or Extension from a ini files This function can be used to load a IOSystem or Extension specified in a ini file. DataFrames (tables) are loaded from text or binary pickle files. For the latter, the extension .pkl or .pickle is assumed, in all other case the tables are assumed to be in .txt format. Parameters ---------- path : string path or ini file name for the data to load recursive : boolean, optional If True, load also the data in the subfolders and add them as extensions to the IOSystem (in that case path must point to the root). Only first order subfolders are considered (no subfolders in subfolders) and if a folder does not contain a ini file it's skipped. Use the subini parameter in case of multiple ini files in a subfolder. Attribute name of the extension in the IOSystem are based on the subfolder name. Default is False ini : string, optional If there are several ini files in the root folder, take this one for loading the data If None (default) take the ini found in the folder, error if several are found subini : dict, optional If there are multiple ini in the subfolder, use the ini given in the dict. Format: 'subfoldername':'ininame' If a key for a subfolder is not found or None (default), the ini found in the folder will be taken, error if several are found include_core : boolean, optional If False the load method does not include A, L and Z matrix. This significantly reduces the required memory if the purpose is only to analyse the results calculated beforehand. Returns ------- IOSystem or Extension class depending on systemtype in the ini file None in case of errors ### Response: def _load_ini_based_io(path, recursive=False, ini=None, subini={}, include_core=True, only_coefficients=False): """ DEPRECATED: For convert a previous version to the new json format Loads a IOSystem or Extension from a ini files This function can be used to load a IOSystem or Extension specified in a ini file. DataFrames (tables) are loaded from text or binary pickle files. For the latter, the extension .pkl or .pickle is assumed, in all other case the tables are assumed to be in .txt format. Parameters ---------- path : string path or ini file name for the data to load recursive : boolean, optional If True, load also the data in the subfolders and add them as extensions to the IOSystem (in that case path must point to the root). Only first order subfolders are considered (no subfolders in subfolders) and if a folder does not contain a ini file it's skipped. Use the subini parameter in case of multiple ini files in a subfolder. Attribute name of the extension in the IOSystem are based on the subfolder name. Default is False ini : string, optional If there are several ini files in the root folder, take this one for loading the data If None (default) take the ini found in the folder, error if several are found subini : dict, optional If there are multiple ini in the subfolder, use the ini given in the dict. Format: 'subfoldername':'ininame' If a key for a subfolder is not found or None (default), the ini found in the folder will be taken, error if several are found include_core : boolean, optional If False the load method does not include A, L and Z matrix. This significantly reduces the required memory if the purpose is only to analyse the results calculated beforehand. Returns ------- IOSystem or Extension class depending on systemtype in the ini file None in case of errors """ # check path and given parameter ini_file_name = None path = os.path.abspath(os.path.normpath(path)) if os.path.splitext(path)[1] == '.ini': (path, ini_file_name) = os.path.split(path) if ini: ini_file_name = ini if not os.path.exists(path): raise ReadError('Given path does not exist') return None if not ini_file_name: _inifound = False for file in os.listdir(path): if os.path.splitext(file)[1] == '.ini': if _inifound: raise ReadError( 'Found multiple ini files in folder - specify one') return None ini_file_name = file _inifound = True # read the ini io_ini = configparser.RawConfigParser() io_ini.optionxform = lambda option: option io_ini.read(os.path.join(path, ini_file_name)) systemtype = io_ini.get('systemtype', 'systemtype', fallback=None) name = io_ini.get('meta', 'name', fallback=os.path.splitext(ini_file_name)[0]) if systemtype == 'IOSystem': ret_system = IOSystem(name=name) elif systemtype == 'Extension': ret_system = Extension(name=name) else: raise ReadError('System not defined in ini') return None for key in io_ini['meta']: setattr(ret_system, key, io_ini.get('meta', key, fallback=None)) for key in io_ini['files']: if '_nr_index_col' in key: continue if '_nr_header' in key: continue if not include_core: not_to_load = ['A', 'L', 'Z'] if key in not_to_load: continue if only_coefficients: _io = IOSystem() if key not in _io.__coefficients__ + ['unit']: continue file_name = io_ini.get('files', key) nr_index_col = io_ini.get( 'files', key + '_nr_index_col', fallback=None) nr_header = io_ini.get('files', key + '_nr_header', fallback=None) if (nr_index_col is None) or (nr_header is None): raise ReadError( 'Index or column specification missing for {}'. format(str(file_name))) return None _index_col = list(range(int(nr_index_col))) _header = list(range(int(nr_header))) if _index_col == [0]: _index_col = 0 if _header == [0]: _header = 0 file = os.path.join(path, file_name) logging.info('Load data from {}'.format(file)) if (os.path.splitext(file)[1] == '.pkl' or os.path.splitext(file)[1] == '.pickle'): setattr(ret_system, key, pd.read_pickle(file)) else: setattr(ret_system, key, pd.read_table(file, index_col=_index_col, header=_header)) if recursive: # look for subfolder in the given path subfolder_list = os.walk(path).__next__()[1] # loop all subfolder and append extension based on # ini file in subfolder for subfolder in subfolder_list: subini_file_name = subini.get(subfolder) subpath = os.path.abspath(os.path.join(path, subfolder)) if not subini_file_name: _inifound = False for file in os.listdir(subpath): if os.path.splitext(file)[1] == '.ini': if _inifound: raise ReadError( 'Found multiple ini files in subfolder ' '{} - specify one'.format(subpath)) return None subini_file_name = file _inifound = True if not _inifound: continue # read the ini subio_ini = configparser.RawConfigParser() subio_ini.optionxform = lambda option: option subio_ini.read(os.path.join(subpath, subini_file_name)) systemtype = subio_ini.get('systemtype', 'systemtype', fallback=None) name = subio_ini.get('meta', 'name', fallback=os.path.splitext( subini_file_name)[0]) if systemtype == 'IOSystem': raise ReadError('IOSystem found in subfolder {} - ' 'only extensions expected'.format(subpath)) return None elif systemtype == 'Extension': sub_system = Extension(name=name) else: raise ReadError('System not defined in ini') return None for key in subio_ini['meta']: setattr(sub_system, key, subio_ini.get('meta', key, fallback=None)) for key in subio_ini['files']: if '_nr_index_col' in key: continue if '_nr_header' in key: continue if only_coefficients: _ext = Extension('temp') if key not in _ext.__coefficients__ + ['unit']: continue file_name = subio_ini.get('files', key) nr_index_col = subio_ini.get('files', key + '_nr_index_col', fallback=None) nr_header = subio_ini.get('files', key + '_nr_header', fallback=None) if (nr_index_col is None) or (nr_header is None): raise ReadError('Index or column specification missing ' 'for {}'.format(str(file_name))) return None _index_col = list(range(int(nr_index_col))) _header = list(range(int(nr_header))) if _index_col == [0]: _index_col = 0 if _header == [0]: _header = 0 file = os.path.join(subpath, file_name) logging.info('Load data from {}'.format(file)) if (os.path.splitext(file)[1] == '.pkl' or os.path.splitext(file)[1] == '.pickle'): setattr(sub_system, key, pd.read_pickle(file)) else: setattr(sub_system, key, pd.read_table(file, index_col=_index_col, header=_header)) # get valid python name from folder def clean(varStr): return re.sub('\W|^(?=\d)', '_', str(varStr)) setattr(ret_system, clean(subfolder), sub_system) return ret_system
def _update_fps(self, event): """Update the fps after every window""" self._frame_count += 1 diff = time() - self._basetime if (diff > self._fps_window): self._fps = self._frame_count / diff self._basetime = time() self._frame_count = 0 self._fps_callback(self.fps)
Update the fps after every window
Below is the the instruction that describes the task: ### Input: Update the fps after every window ### Response: def _update_fps(self, event): """Update the fps after every window""" self._frame_count += 1 diff = time() - self._basetime if (diff > self._fps_window): self._fps = self._frame_count / diff self._basetime = time() self._frame_count = 0 self._fps_callback(self.fps)
def data(self): """Gives direct access to the data as numpy arrays. Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion. Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use DataFrame.evalulate(...). Columns can be accesed by there names, which are attributes. The attribues are of type numpy.ndarray. Example: >>> df = vaex.example() >>> r = np.sqrt(df.data.x**2 + df.data.y**2) """ class Datas(object): pass datas = Datas() for name, array in self.columns.items(): setattr(datas, name, array) return datas
Gives direct access to the data as numpy arrays. Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion. Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use DataFrame.evalulate(...). Columns can be accesed by there names, which are attributes. The attribues are of type numpy.ndarray. Example: >>> df = vaex.example() >>> r = np.sqrt(df.data.x**2 + df.data.y**2)
Below is the the instruction that describes the task: ### Input: Gives direct access to the data as numpy arrays. Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion. Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use DataFrame.evalulate(...). Columns can be accesed by there names, which are attributes. The attribues are of type numpy.ndarray. Example: >>> df = vaex.example() >>> r = np.sqrt(df.data.x**2 + df.data.y**2) ### Response: def data(self): """Gives direct access to the data as numpy arrays. Convenient when working with IPython in combination with small DataFrames, since this gives tab-completion. Only real columns (i.e. no virtual) columns can be accessed, for getting the data from virtual columns, use DataFrame.evalulate(...). Columns can be accesed by there names, which are attributes. The attribues are of type numpy.ndarray. Example: >>> df = vaex.example() >>> r = np.sqrt(df.data.x**2 + df.data.y**2) """ class Datas(object): pass datas = Datas() for name, array in self.columns.items(): setattr(datas, name, array) return datas
def fire_event(self, event_name, service_name, default=None): """ Fire a data_ready, data_lost, start, or stop event on a given service. """ service = self.get_service(service_name) callbacks = service.get(event_name, default) if not callbacks: return if not isinstance(callbacks, Iterable): callbacks = [callbacks] for callback in callbacks: if isinstance(callback, ManagerCallback): callback(self, service_name, event_name) else: callback(service_name)
Fire a data_ready, data_lost, start, or stop event on a given service.
Below is the the instruction that describes the task: ### Input: Fire a data_ready, data_lost, start, or stop event on a given service. ### Response: def fire_event(self, event_name, service_name, default=None): """ Fire a data_ready, data_lost, start, or stop event on a given service. """ service = self.get_service(service_name) callbacks = service.get(event_name, default) if not callbacks: return if not isinstance(callbacks, Iterable): callbacks = [callbacks] for callback in callbacks: if isinstance(callback, ManagerCallback): callback(self, service_name, event_name) else: callback(service_name)
def kak_decomposition( mat: np.ndarray, rtol: float = 1e-5, atol: float = 1e-8) -> KakDecomposition: """Decomposes a 2-qubit unitary into 1-qubit ops and XX/YY/ZZ interactions. Args: mat: The 4x4 unitary matrix to decompose. rtol: Per-matrix-entry relative tolerance on equality. atol: Per-matrix-entry absolute tolerance on equality. Returns: A `cirq.KakDecomposition` canonicalized such that the interaction coefficients x, y, z satisfy: 0 ≤ abs(z) ≤ y ≤ x ≤ π/4 z ≠ -π/4 Raises: ValueError: Bad matrix. ArithmeticError: Failed to perform the decomposition. References: 'An Introduction to Cartan's KAK Decomposition for QC Programmers' https://arxiv.org/abs/quant-ph/0507171 """ magic = np.array([[1, 0, 0, 1j], [0, 1j, 1, 0], [0, 1j, -1, 0], [1, 0, 0, -1j]]) * np.sqrt(0.5) gamma = np.array([[1, 1, 1, 1], [1, 1, -1, -1], [-1, 1, -1, 1], [1, -1, -1, 1]]) * 0.25 # Diagonalize in magic basis. left, d, right = diagonalize.bidiagonalize_unitary_with_special_orthogonals( combinators.dot(np.conj(magic.T), mat, magic), atol=atol, rtol=rtol, check_preconditions=False) # Recover pieces. a1, a0 = so4_to_magic_su2s(left.T, atol=atol, rtol=rtol, check_preconditions=False) b1, b0 = so4_to_magic_su2s(right.T, atol=atol, rtol=rtol, check_preconditions=False) w, x, y, z = gamma.dot(np.vstack(np.angle(d))).flatten() g = np.exp(1j * w) # Canonicalize. inner_cannon = kak_canonicalize_vector(x, y, z) b1 = np.dot(inner_cannon.single_qubit_operations_before[0], b1) b0 = np.dot(inner_cannon.single_qubit_operations_before[1], b0) a1 = np.dot(a1, inner_cannon.single_qubit_operations_after[0]) a0 = np.dot(a0, inner_cannon.single_qubit_operations_after[1]) return KakDecomposition( interaction_coefficients=inner_cannon.interaction_coefficients, global_phase=g * inner_cannon.global_phase, single_qubit_operations_before=(b1, b0), single_qubit_operations_after=(a1, a0))
Decomposes a 2-qubit unitary into 1-qubit ops and XX/YY/ZZ interactions. Args: mat: The 4x4 unitary matrix to decompose. rtol: Per-matrix-entry relative tolerance on equality. atol: Per-matrix-entry absolute tolerance on equality. Returns: A `cirq.KakDecomposition` canonicalized such that the interaction coefficients x, y, z satisfy: 0 ≤ abs(z) ≤ y ≤ x ≤ π/4 z ≠ -π/4 Raises: ValueError: Bad matrix. ArithmeticError: Failed to perform the decomposition. References: 'An Introduction to Cartan's KAK Decomposition for QC Programmers' https://arxiv.org/abs/quant-ph/0507171
Below is the the instruction that describes the task: ### Input: Decomposes a 2-qubit unitary into 1-qubit ops and XX/YY/ZZ interactions. Args: mat: The 4x4 unitary matrix to decompose. rtol: Per-matrix-entry relative tolerance on equality. atol: Per-matrix-entry absolute tolerance on equality. Returns: A `cirq.KakDecomposition` canonicalized such that the interaction coefficients x, y, z satisfy: 0 ≤ abs(z) ≤ y ≤ x ≤ π/4 z ≠ -π/4 Raises: ValueError: Bad matrix. ArithmeticError: Failed to perform the decomposition. References: 'An Introduction to Cartan's KAK Decomposition for QC Programmers' https://arxiv.org/abs/quant-ph/0507171 ### Response: def kak_decomposition( mat: np.ndarray, rtol: float = 1e-5, atol: float = 1e-8) -> KakDecomposition: """Decomposes a 2-qubit unitary into 1-qubit ops and XX/YY/ZZ interactions. Args: mat: The 4x4 unitary matrix to decompose. rtol: Per-matrix-entry relative tolerance on equality. atol: Per-matrix-entry absolute tolerance on equality. Returns: A `cirq.KakDecomposition` canonicalized such that the interaction coefficients x, y, z satisfy: 0 ≤ abs(z) ≤ y ≤ x ≤ π/4 z ≠ -π/4 Raises: ValueError: Bad matrix. ArithmeticError: Failed to perform the decomposition. References: 'An Introduction to Cartan's KAK Decomposition for QC Programmers' https://arxiv.org/abs/quant-ph/0507171 """ magic = np.array([[1, 0, 0, 1j], [0, 1j, 1, 0], [0, 1j, -1, 0], [1, 0, 0, -1j]]) * np.sqrt(0.5) gamma = np.array([[1, 1, 1, 1], [1, 1, -1, -1], [-1, 1, -1, 1], [1, -1, -1, 1]]) * 0.25 # Diagonalize in magic basis. left, d, right = diagonalize.bidiagonalize_unitary_with_special_orthogonals( combinators.dot(np.conj(magic.T), mat, magic), atol=atol, rtol=rtol, check_preconditions=False) # Recover pieces. a1, a0 = so4_to_magic_su2s(left.T, atol=atol, rtol=rtol, check_preconditions=False) b1, b0 = so4_to_magic_su2s(right.T, atol=atol, rtol=rtol, check_preconditions=False) w, x, y, z = gamma.dot(np.vstack(np.angle(d))).flatten() g = np.exp(1j * w) # Canonicalize. inner_cannon = kak_canonicalize_vector(x, y, z) b1 = np.dot(inner_cannon.single_qubit_operations_before[0], b1) b0 = np.dot(inner_cannon.single_qubit_operations_before[1], b0) a1 = np.dot(a1, inner_cannon.single_qubit_operations_after[0]) a0 = np.dot(a0, inner_cannon.single_qubit_operations_after[1]) return KakDecomposition( interaction_coefficients=inner_cannon.interaction_coefficients, global_phase=g * inner_cannon.global_phase, single_qubit_operations_before=(b1, b0), single_qubit_operations_after=(a1, a0))
def get_otp(hsm, args): """ Get OTP from YubiKey. """ if args.no_otp: return None if hsm.version.have_unlock(): if args.stdin: otp = sys.stdin.readline() while otp and otp[-1] == '\n': otp = otp[:-1] else: otp = raw_input('Enter admin YubiKey OTP (press enter to skip) : ') if len(otp) == 44: # YubiHSM admin OTP's always have a public_id length of 6 bytes return otp if otp: sys.stderr.write("ERROR: Invalid YubiKey OTP\n") return None
Get OTP from YubiKey.
Below is the the instruction that describes the task: ### Input: Get OTP from YubiKey. ### Response: def get_otp(hsm, args): """ Get OTP from YubiKey. """ if args.no_otp: return None if hsm.version.have_unlock(): if args.stdin: otp = sys.stdin.readline() while otp and otp[-1] == '\n': otp = otp[:-1] else: otp = raw_input('Enter admin YubiKey OTP (press enter to skip) : ') if len(otp) == 44: # YubiHSM admin OTP's always have a public_id length of 6 bytes return otp if otp: sys.stderr.write("ERROR: Invalid YubiKey OTP\n") return None
def supervise_until_complete(monitor_data, dependencies, args, recovery_file): """ Supervisor loop. Loop forever until all tasks are evaluated or completed """ project = args['project'] workspace = args['workspace'] namespace = args['namespace'] sample_sets = args['sample_sets'] recovery_data = {'args': args} if not validate_monitor_tasks(dependencies, args): logging.error("Errors found, aborting...") return while True: # There are 4 possible states for each node: # 1. Not Started -- In this state, check all the dependencies for the # node (possibly 0). If all of them have been evaluated, and the # satisfiedMode is met, start the task, change to "Running". if # satisfiedMode is not met, change to "Evaluated" # # 2. Running -- Submitted in FC. Check the submission endpoint, and # if it has completed, change to "Completed", set evaluated=True, # and whether the task succeeded # Otherwise, do nothing # # 3. Completed -- Job ran in FC and either succeeded or failed. Do nothing # 4. Evaluated -- All dependencies evaluated, but this task did not run # do nothing # Keep a tab of the number of jobs in each category running = 0 waiting = 0 completed = 0 # Get the submissions r = fapi.list_submissions(project, workspace) sub_list = r.json() #TODO: filter this list by submission time first? sub_lookup = {s["submissionId"]: s for s in sub_list} # Keys of dependencies is the list of tasks to run for n in dependencies: for sset in sample_sets: task_data = monitor_data[n][sset] if task_data['state'] == "Not Started": # See if all of the dependencies have been evaluated upstream_evaluated = True for dep in dependencies[n]: # Look up the status of the task upstream_task_data = monitor_data[dep['upstream_task']][sset] if not upstream_task_data.get('evaluated'): upstream_evaluated = False # if all of the dependencies have been evaluated, we can evaluate # this node if upstream_evaluated: # Now check the satisfied Mode of all the dependencies should_run = True for dep in dependencies[n]: upstream_task_data = monitor_data[dep['upstream_task']][sset] mode = dep['satisfiedMode'] # Task must have succeeded for OnComplete if mode == '"OnComplete"' and not upstream_task_data['succeeded']: should_run = False # 'Always' and 'Optional' run once the deps have been # evaluated if should_run: # Submit the workflow to FC fc_config = n logging.info("Starting workflow " + fc_config + " on " + sset) # How to handle errors at this step? for retry in range(3): r = fapi.create_submission( project, workspace, namespace, fc_config, sset, etype="sample_set", expression=None ) if r.status_code == 201: task_data['submissionId'] = r.json()['submissionId'] task_data['state'] = "Running" running += 1 break else: # There was an error, under certain circumstances retry logging.debug("Create_submission for " + fc_config + "failed on " + sset + " with the following response:" + r.content + "\nRetrying...") else: # None of the attempts above succeeded, log an error, mark as failed logging.error("Maximum retries exceeded") task_data['state'] = 'Completed' task_data['evaluated'] = True task_data['succeeded'] = False else: # This task will never be able to run, mark evaluated task_data['state'] = "Evaluated" task_data['evaluated'] = True completed += 1 else: waiting += 1 elif task_data['state'] == "Running": submission = sub_lookup[task_data['submissionId']] status = submission['status'] if status == "Done": # Look at the individual workflows to see if there were # failures logging.info("Workflow " + n + " completed for " + sset) success = 'Failed' not in submission['workflowStatuses'] task_data['evaluated'] = True task_data['succeeded'] = success task_data['state'] = "Completed" completed += 1 else: # Submission isn't done, don't do anything running += 1 else: # Either Completed or evaluated completed += 1 # Save the state of the monitor for recovery purposes # Have to do this for every workflow + sample_set so we don't lose track of any recovery_data['monitor_data'] = monitor_data recovery_data['dependencies'] = dependencies with open(recovery_file, 'w') as rf: json.dump(recovery_data, rf) logging.info("{0} Waiting, {1} Running, {2} Completed".format(waiting, running, completed)) # If all tasks have been evaluated, we are done if all(monitor_data[n][sset]['evaluated'] for n in monitor_data for sset in monitor_data[n]): logging.info("DONE.") break time.sleep(30)
Supervisor loop. Loop forever until all tasks are evaluated or completed
Below is the the instruction that describes the task: ### Input: Supervisor loop. Loop forever until all tasks are evaluated or completed ### Response: def supervise_until_complete(monitor_data, dependencies, args, recovery_file): """ Supervisor loop. Loop forever until all tasks are evaluated or completed """ project = args['project'] workspace = args['workspace'] namespace = args['namespace'] sample_sets = args['sample_sets'] recovery_data = {'args': args} if not validate_monitor_tasks(dependencies, args): logging.error("Errors found, aborting...") return while True: # There are 4 possible states for each node: # 1. Not Started -- In this state, check all the dependencies for the # node (possibly 0). If all of them have been evaluated, and the # satisfiedMode is met, start the task, change to "Running". if # satisfiedMode is not met, change to "Evaluated" # # 2. Running -- Submitted in FC. Check the submission endpoint, and # if it has completed, change to "Completed", set evaluated=True, # and whether the task succeeded # Otherwise, do nothing # # 3. Completed -- Job ran in FC and either succeeded or failed. Do nothing # 4. Evaluated -- All dependencies evaluated, but this task did not run # do nothing # Keep a tab of the number of jobs in each category running = 0 waiting = 0 completed = 0 # Get the submissions r = fapi.list_submissions(project, workspace) sub_list = r.json() #TODO: filter this list by submission time first? sub_lookup = {s["submissionId"]: s for s in sub_list} # Keys of dependencies is the list of tasks to run for n in dependencies: for sset in sample_sets: task_data = monitor_data[n][sset] if task_data['state'] == "Not Started": # See if all of the dependencies have been evaluated upstream_evaluated = True for dep in dependencies[n]: # Look up the status of the task upstream_task_data = monitor_data[dep['upstream_task']][sset] if not upstream_task_data.get('evaluated'): upstream_evaluated = False # if all of the dependencies have been evaluated, we can evaluate # this node if upstream_evaluated: # Now check the satisfied Mode of all the dependencies should_run = True for dep in dependencies[n]: upstream_task_data = monitor_data[dep['upstream_task']][sset] mode = dep['satisfiedMode'] # Task must have succeeded for OnComplete if mode == '"OnComplete"' and not upstream_task_data['succeeded']: should_run = False # 'Always' and 'Optional' run once the deps have been # evaluated if should_run: # Submit the workflow to FC fc_config = n logging.info("Starting workflow " + fc_config + " on " + sset) # How to handle errors at this step? for retry in range(3): r = fapi.create_submission( project, workspace, namespace, fc_config, sset, etype="sample_set", expression=None ) if r.status_code == 201: task_data['submissionId'] = r.json()['submissionId'] task_data['state'] = "Running" running += 1 break else: # There was an error, under certain circumstances retry logging.debug("Create_submission for " + fc_config + "failed on " + sset + " with the following response:" + r.content + "\nRetrying...") else: # None of the attempts above succeeded, log an error, mark as failed logging.error("Maximum retries exceeded") task_data['state'] = 'Completed' task_data['evaluated'] = True task_data['succeeded'] = False else: # This task will never be able to run, mark evaluated task_data['state'] = "Evaluated" task_data['evaluated'] = True completed += 1 else: waiting += 1 elif task_data['state'] == "Running": submission = sub_lookup[task_data['submissionId']] status = submission['status'] if status == "Done": # Look at the individual workflows to see if there were # failures logging.info("Workflow " + n + " completed for " + sset) success = 'Failed' not in submission['workflowStatuses'] task_data['evaluated'] = True task_data['succeeded'] = success task_data['state'] = "Completed" completed += 1 else: # Submission isn't done, don't do anything running += 1 else: # Either Completed or evaluated completed += 1 # Save the state of the monitor for recovery purposes # Have to do this for every workflow + sample_set so we don't lose track of any recovery_data['monitor_data'] = monitor_data recovery_data['dependencies'] = dependencies with open(recovery_file, 'w') as rf: json.dump(recovery_data, rf) logging.info("{0} Waiting, {1} Running, {2} Completed".format(waiting, running, completed)) # If all tasks have been evaluated, we are done if all(monitor_data[n][sset]['evaluated'] for n in monitor_data for sset in monitor_data[n]): logging.info("DONE.") break time.sleep(30)
def configure_logging(verbose, logger): """Configures the logging used.""" if not verbose: log_level = logging.WARNING elif verbose == 1: log_level = logging.INFO else: log_level = logging.DEBUG logger.setLevel(log_level) ch = colorlog.StreamHandler() ch.setLevel(log_level) formatter = colorlog.ColoredFormatter( '%(log_color)s%(asctime)s %(name)s %(levelname)s: %(message)s') ch.setFormatter(formatter) logger.addHandler(ch)
Configures the logging used.
Below is the the instruction that describes the task: ### Input: Configures the logging used. ### Response: def configure_logging(verbose, logger): """Configures the logging used.""" if not verbose: log_level = logging.WARNING elif verbose == 1: log_level = logging.INFO else: log_level = logging.DEBUG logger.setLevel(log_level) ch = colorlog.StreamHandler() ch.setLevel(log_level) formatter = colorlog.ColoredFormatter( '%(log_color)s%(asctime)s %(name)s %(levelname)s: %(message)s') ch.setFormatter(formatter) logger.addHandler(ch)
def write_node(node, writer=None, encoding='utf-8', indent=0, newline='', omit_declaration=False, node_depth=0, quote_char='"'): """ Serialize an *xml4h* DOM node and its descendants to text, writing the output to a given *writer* or to stdout. :param node: the DOM node whose content and descendants will be serialized. :type node: an :class:`xml4h.nodes.Node` or subclass :param writer: an object such as a file or stream to which XML text is sent. If *None* text is sent to :attr:`sys.stdout`. :type writer: a file, stream, etc or None :param string encoding: the character encoding for serialized text. :param indent: indentation prefix to apply to descendent nodes for pretty-printing. The value can take many forms: - *int*: the number of spaces to indent. 0 means no indent. - *string*: a literal prefix for indented nodes, such as ``\\t``. - *bool*: no indent if *False*, four spaces indent if *True*. - *None*: no indent. :type indent: string, int, bool, or None :param newline: the string value used to separate lines of output. The value can take a number of forms: - *string*: the literal newline value, such as ``\\n`` or ``\\r``. An empty string means no newline. - *bool*: no newline if *False*, ``\\n`` newline if *True*. - *None*: no newline. :type newline: string, bool, or None :param boolean omit_declaration: if *True* the XML declaration header is omitted, otherwise it is included. Note that the declaration is only output when serializing an :class:`xml4h.nodes.Document` node. :param int node_depth: the indentation level to start at, such as 2 to indent output as if the given *node* has two ancestors. This parameter will only be useful if you need to output XML text fragments that can be assembled into a document. This parameter has no effect unless indentation is applied. :param string quote_char: the character that delimits quoted content. You should never need to mess with this. """ def _sanitize_write_value(value): """Return XML-encoded value.""" if not value: return value return (value .replace("&", "&amp;") .replace("<", "&lt;") .replace("\"", "&quot;") .replace(">", "&gt;") ) def _write_node_impl(node, node_depth): """ Internal write implementation that does the real work while keeping track of node depth. """ # Output document declaration if we're outputting the whole doc if node.is_document: if not omit_declaration: writer.write( '<?xml version=%s1.0%s' % (quote_char, quote_char)) if encoding: writer.write(' encoding=%s%s%s' % (quote_char, encoding, quote_char)) writer.write('?>%s' % newline) for child in node.children: _write_node_impl(child, node_depth) # node_depth not incremented writer.write(newline) elif node.is_document_type: writer.write("<!DOCTYPE %s SYSTEM %s%s%s" % (node.name, quote_char, node.public_id)) if node.system_id is not None: writer.write( " %s%s%s" % (quote_char, node.system_id, quote_char)) if node.children: writer.write("[") for child in node.children: _write_node_impl(child, node_depth + 1) writer.write("]") writer.write(">") elif node.is_text: writer.write(_sanitize_write_value(node.value)) elif node.is_cdata: if ']]>' in node.value: raise ValueError("']]>' is not allowed in CDATA node value") writer.write("<![CDATA[%s]]>" % node.value) #elif node.is_entity_reference: # TODO elif node.is_entity: writer.write(newline + indent * node_depth) writer.write("<!ENTITY ") if node.is_paremeter_entity: writer.write('%% ') writer.write("%s %s%s%s>" % (node.name, quote_char, node.value, quote_char)) elif node.is_processing_instruction: writer.write(newline + indent * node_depth) writer.write("<?%s %s?>" % (node.target, node.data)) elif node.is_comment: if '--' in node.value: raise ValueError("'--' is not allowed in COMMENT node value") writer.write("<!--%s-->" % node.value) elif node.is_notation: writer.write(newline + indent * node_depth) writer.write("<!NOTATION %s" % node.name) if node.is_system_identifier: writer.write(" system %s%s%s>" % (quote_char, node.external_id, quote_char)) elif node.is_system_identifier: writer.write(" system %s%s%s %s%s%s>" % (quote_char, node.external_id, quote_char, quote_char, node.uri, quote_char)) elif node.is_attribute: writer.write(" %s=%s" % (node.name, quote_char)) writer.write(_sanitize_write_value(node.value)) writer.write(quote_char) elif node.is_element: # Only need a preceding newline if we're in a sub-element if node_depth > 0: writer.write(newline) writer.write(indent * node_depth) writer.write("<" + node.name) for attr in node.attribute_nodes: _write_node_impl(attr, node_depth) if node.children: found_indented_child = False writer.write(">") for child in node.children: _write_node_impl(child, node_depth + 1) if not (child.is_text or child.is_comment or child.is_cdata): found_indented_child = True if found_indented_child: writer.write(newline + indent * node_depth) writer.write('</%s>' % node.name) else: writer.write('/>') else: raise exceptions.Xml4hImplementationBug( 'Cannot write node with class: %s' % node.__class__) # Sanitize whitespace parameters if indent is True: indent = ' ' * 4 elif indent is False: indent = '' elif isinstance(indent, int): indent = ' ' * indent # If indent but no newline set, always apply a newline (it makes sense) if indent and not newline: newline = True if newline is None or newline is False: newline = '' elif newline is True: newline = '\n' # We always need a writer, use stdout by default if writer is None: writer = sys.stdout # Apply a text encoding if we have one if encoding is None: writer = writer else: writer = codecs.getwriter(encoding)(writer) # Do the business... _write_node_impl(node, node_depth)
Serialize an *xml4h* DOM node and its descendants to text, writing the output to a given *writer* or to stdout. :param node: the DOM node whose content and descendants will be serialized. :type node: an :class:`xml4h.nodes.Node` or subclass :param writer: an object such as a file or stream to which XML text is sent. If *None* text is sent to :attr:`sys.stdout`. :type writer: a file, stream, etc or None :param string encoding: the character encoding for serialized text. :param indent: indentation prefix to apply to descendent nodes for pretty-printing. The value can take many forms: - *int*: the number of spaces to indent. 0 means no indent. - *string*: a literal prefix for indented nodes, such as ``\\t``. - *bool*: no indent if *False*, four spaces indent if *True*. - *None*: no indent. :type indent: string, int, bool, or None :param newline: the string value used to separate lines of output. The value can take a number of forms: - *string*: the literal newline value, such as ``\\n`` or ``\\r``. An empty string means no newline. - *bool*: no newline if *False*, ``\\n`` newline if *True*. - *None*: no newline. :type newline: string, bool, or None :param boolean omit_declaration: if *True* the XML declaration header is omitted, otherwise it is included. Note that the declaration is only output when serializing an :class:`xml4h.nodes.Document` node. :param int node_depth: the indentation level to start at, such as 2 to indent output as if the given *node* has two ancestors. This parameter will only be useful if you need to output XML text fragments that can be assembled into a document. This parameter has no effect unless indentation is applied. :param string quote_char: the character that delimits quoted content. You should never need to mess with this.
Below is the the instruction that describes the task: ### Input: Serialize an *xml4h* DOM node and its descendants to text, writing the output to a given *writer* or to stdout. :param node: the DOM node whose content and descendants will be serialized. :type node: an :class:`xml4h.nodes.Node` or subclass :param writer: an object such as a file or stream to which XML text is sent. If *None* text is sent to :attr:`sys.stdout`. :type writer: a file, stream, etc or None :param string encoding: the character encoding for serialized text. :param indent: indentation prefix to apply to descendent nodes for pretty-printing. The value can take many forms: - *int*: the number of spaces to indent. 0 means no indent. - *string*: a literal prefix for indented nodes, such as ``\\t``. - *bool*: no indent if *False*, four spaces indent if *True*. - *None*: no indent. :type indent: string, int, bool, or None :param newline: the string value used to separate lines of output. The value can take a number of forms: - *string*: the literal newline value, such as ``\\n`` or ``\\r``. An empty string means no newline. - *bool*: no newline if *False*, ``\\n`` newline if *True*. - *None*: no newline. :type newline: string, bool, or None :param boolean omit_declaration: if *True* the XML declaration header is omitted, otherwise it is included. Note that the declaration is only output when serializing an :class:`xml4h.nodes.Document` node. :param int node_depth: the indentation level to start at, such as 2 to indent output as if the given *node* has two ancestors. This parameter will only be useful if you need to output XML text fragments that can be assembled into a document. This parameter has no effect unless indentation is applied. :param string quote_char: the character that delimits quoted content. You should never need to mess with this. ### Response: def write_node(node, writer=None, encoding='utf-8', indent=0, newline='', omit_declaration=False, node_depth=0, quote_char='"'): """ Serialize an *xml4h* DOM node and its descendants to text, writing the output to a given *writer* or to stdout. :param node: the DOM node whose content and descendants will be serialized. :type node: an :class:`xml4h.nodes.Node` or subclass :param writer: an object such as a file or stream to which XML text is sent. If *None* text is sent to :attr:`sys.stdout`. :type writer: a file, stream, etc or None :param string encoding: the character encoding for serialized text. :param indent: indentation prefix to apply to descendent nodes for pretty-printing. The value can take many forms: - *int*: the number of spaces to indent. 0 means no indent. - *string*: a literal prefix for indented nodes, such as ``\\t``. - *bool*: no indent if *False*, four spaces indent if *True*. - *None*: no indent. :type indent: string, int, bool, or None :param newline: the string value used to separate lines of output. The value can take a number of forms: - *string*: the literal newline value, such as ``\\n`` or ``\\r``. An empty string means no newline. - *bool*: no newline if *False*, ``\\n`` newline if *True*. - *None*: no newline. :type newline: string, bool, or None :param boolean omit_declaration: if *True* the XML declaration header is omitted, otherwise it is included. Note that the declaration is only output when serializing an :class:`xml4h.nodes.Document` node. :param int node_depth: the indentation level to start at, such as 2 to indent output as if the given *node* has two ancestors. This parameter will only be useful if you need to output XML text fragments that can be assembled into a document. This parameter has no effect unless indentation is applied. :param string quote_char: the character that delimits quoted content. You should never need to mess with this. """ def _sanitize_write_value(value): """Return XML-encoded value.""" if not value: return value return (value .replace("&", "&amp;") .replace("<", "&lt;") .replace("\"", "&quot;") .replace(">", "&gt;") ) def _write_node_impl(node, node_depth): """ Internal write implementation that does the real work while keeping track of node depth. """ # Output document declaration if we're outputting the whole doc if node.is_document: if not omit_declaration: writer.write( '<?xml version=%s1.0%s' % (quote_char, quote_char)) if encoding: writer.write(' encoding=%s%s%s' % (quote_char, encoding, quote_char)) writer.write('?>%s' % newline) for child in node.children: _write_node_impl(child, node_depth) # node_depth not incremented writer.write(newline) elif node.is_document_type: writer.write("<!DOCTYPE %s SYSTEM %s%s%s" % (node.name, quote_char, node.public_id)) if node.system_id is not None: writer.write( " %s%s%s" % (quote_char, node.system_id, quote_char)) if node.children: writer.write("[") for child in node.children: _write_node_impl(child, node_depth + 1) writer.write("]") writer.write(">") elif node.is_text: writer.write(_sanitize_write_value(node.value)) elif node.is_cdata: if ']]>' in node.value: raise ValueError("']]>' is not allowed in CDATA node value") writer.write("<![CDATA[%s]]>" % node.value) #elif node.is_entity_reference: # TODO elif node.is_entity: writer.write(newline + indent * node_depth) writer.write("<!ENTITY ") if node.is_paremeter_entity: writer.write('%% ') writer.write("%s %s%s%s>" % (node.name, quote_char, node.value, quote_char)) elif node.is_processing_instruction: writer.write(newline + indent * node_depth) writer.write("<?%s %s?>" % (node.target, node.data)) elif node.is_comment: if '--' in node.value: raise ValueError("'--' is not allowed in COMMENT node value") writer.write("<!--%s-->" % node.value) elif node.is_notation: writer.write(newline + indent * node_depth) writer.write("<!NOTATION %s" % node.name) if node.is_system_identifier: writer.write(" system %s%s%s>" % (quote_char, node.external_id, quote_char)) elif node.is_system_identifier: writer.write(" system %s%s%s %s%s%s>" % (quote_char, node.external_id, quote_char, quote_char, node.uri, quote_char)) elif node.is_attribute: writer.write(" %s=%s" % (node.name, quote_char)) writer.write(_sanitize_write_value(node.value)) writer.write(quote_char) elif node.is_element: # Only need a preceding newline if we're in a sub-element if node_depth > 0: writer.write(newline) writer.write(indent * node_depth) writer.write("<" + node.name) for attr in node.attribute_nodes: _write_node_impl(attr, node_depth) if node.children: found_indented_child = False writer.write(">") for child in node.children: _write_node_impl(child, node_depth + 1) if not (child.is_text or child.is_comment or child.is_cdata): found_indented_child = True if found_indented_child: writer.write(newline + indent * node_depth) writer.write('</%s>' % node.name) else: writer.write('/>') else: raise exceptions.Xml4hImplementationBug( 'Cannot write node with class: %s' % node.__class__) # Sanitize whitespace parameters if indent is True: indent = ' ' * 4 elif indent is False: indent = '' elif isinstance(indent, int): indent = ' ' * indent # If indent but no newline set, always apply a newline (it makes sense) if indent and not newline: newline = True if newline is None or newline is False: newline = '' elif newline is True: newline = '\n' # We always need a writer, use stdout by default if writer is None: writer = sys.stdout # Apply a text encoding if we have one if encoding is None: writer = writer else: writer = codecs.getwriter(encoding)(writer) # Do the business... _write_node_impl(node, node_depth)
def GetReportDownloader(self, version=sorted(_SERVICE_MAP.keys())[-1], server=None): """Creates a downloader for AdWords reports. This is a convenience method. It is functionally identical to calling ReportDownloader(adwords_client, version, server). Args: [optional] version: A string identifying the AdWords version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the AdWords API. Returns: A ReportDownloader tied to this AdWordsClient, ready to download reports. """ if not server: server = _DEFAULT_ENDPOINT return ReportDownloader(self, version, server)
Creates a downloader for AdWords reports. This is a convenience method. It is functionally identical to calling ReportDownloader(adwords_client, version, server). Args: [optional] version: A string identifying the AdWords version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the AdWords API. Returns: A ReportDownloader tied to this AdWordsClient, ready to download reports.
Below is the the instruction that describes the task: ### Input: Creates a downloader for AdWords reports. This is a convenience method. It is functionally identical to calling ReportDownloader(adwords_client, version, server). Args: [optional] version: A string identifying the AdWords version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the AdWords API. Returns: A ReportDownloader tied to this AdWordsClient, ready to download reports. ### Response: def GetReportDownloader(self, version=sorted(_SERVICE_MAP.keys())[-1], server=None): """Creates a downloader for AdWords reports. This is a convenience method. It is functionally identical to calling ReportDownloader(adwords_client, version, server). Args: [optional] version: A string identifying the AdWords version to connect to. This defaults to what is currently the latest version. This will be updated in future releases to point to what is then the latest version. server: A string identifying the webserver hosting the AdWords API. Returns: A ReportDownloader tied to this AdWordsClient, ready to download reports. """ if not server: server = _DEFAULT_ENDPOINT return ReportDownloader(self, version, server)
def stack_sparse_frame(frame): """ Only makes sense when fill_value is NaN """ lengths = [s.sp_index.npoints for _, s in frame.items()] nobs = sum(lengths) # this is pretty fast minor_codes = np.repeat(np.arange(len(frame.columns)), lengths) inds_to_concat = [] vals_to_concat = [] # TODO: Figure out whether this can be reached. # I think this currently can't be reached because you can't build a # SparseDataFrame with a non-np.NaN fill value (fails earlier). for _, series in frame.items(): if not np.isnan(series.fill_value): raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) vals_to_concat.append(series.sp_values) major_codes = np.concatenate(inds_to_concat) stacked_values = np.concatenate(vals_to_concat) index = MultiIndex(levels=[frame.index, frame.columns], codes=[major_codes, minor_codes], verify_integrity=False) lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index, columns=['foo']) return lp.sort_index(level=0)
Only makes sense when fill_value is NaN
Below is the the instruction that describes the task: ### Input: Only makes sense when fill_value is NaN ### Response: def stack_sparse_frame(frame): """ Only makes sense when fill_value is NaN """ lengths = [s.sp_index.npoints for _, s in frame.items()] nobs = sum(lengths) # this is pretty fast minor_codes = np.repeat(np.arange(len(frame.columns)), lengths) inds_to_concat = [] vals_to_concat = [] # TODO: Figure out whether this can be reached. # I think this currently can't be reached because you can't build a # SparseDataFrame with a non-np.NaN fill value (fails earlier). for _, series in frame.items(): if not np.isnan(series.fill_value): raise TypeError('This routine assumes NaN fill value') int_index = series.sp_index.to_int_index() inds_to_concat.append(int_index.indices) vals_to_concat.append(series.sp_values) major_codes = np.concatenate(inds_to_concat) stacked_values = np.concatenate(vals_to_concat) index = MultiIndex(levels=[frame.index, frame.columns], codes=[major_codes, minor_codes], verify_integrity=False) lp = DataFrame(stacked_values.reshape((nobs, 1)), index=index, columns=['foo']) return lp.sort_index(level=0)
def fbeta(log_preds, targs, beta, thresh=0.5, epsilon=1e-8): """Calculates the F-beta score (the weighted harmonic mean of precision and recall). This is the micro averaged version where the true positives, false negatives and false positives are calculated globally (as opposed to on a per label basis). beta == 1 places equal weight on precision and recall, b < 1 emphasizes precision and beta > 1 favors recall. """ assert beta > 0, 'beta needs to be greater than 0' beta2 = beta ** 2 rec = recall(log_preds, targs, thresh) prec = precision(log_preds, targs, thresh) return (1 + beta2) * prec * rec / (beta2 * prec + rec + epsilon)
Calculates the F-beta score (the weighted harmonic mean of precision and recall). This is the micro averaged version where the true positives, false negatives and false positives are calculated globally (as opposed to on a per label basis). beta == 1 places equal weight on precision and recall, b < 1 emphasizes precision and beta > 1 favors recall.
Below is the the instruction that describes the task: ### Input: Calculates the F-beta score (the weighted harmonic mean of precision and recall). This is the micro averaged version where the true positives, false negatives and false positives are calculated globally (as opposed to on a per label basis). beta == 1 places equal weight on precision and recall, b < 1 emphasizes precision and beta > 1 favors recall. ### Response: def fbeta(log_preds, targs, beta, thresh=0.5, epsilon=1e-8): """Calculates the F-beta score (the weighted harmonic mean of precision and recall). This is the micro averaged version where the true positives, false negatives and false positives are calculated globally (as opposed to on a per label basis). beta == 1 places equal weight on precision and recall, b < 1 emphasizes precision and beta > 1 favors recall. """ assert beta > 0, 'beta needs to be greater than 0' beta2 = beta ** 2 rec = recall(log_preds, targs, thresh) prec = precision(log_preds, targs, thresh) return (1 + beta2) * prec * rec / (beta2 * prec + rec + epsilon)
def debug_text_simple(self, text: str): """ Draws a text in the top left corner of the screen (up to a max of 6 messages it seems). Don't forget to add 'await self._client.send_debug'. """ self._debug_texts.append(self.to_debug_message(text))
Draws a text in the top left corner of the screen (up to a max of 6 messages it seems). Don't forget to add 'await self._client.send_debug'.
Below is the the instruction that describes the task: ### Input: Draws a text in the top left corner of the screen (up to a max of 6 messages it seems). Don't forget to add 'await self._client.send_debug'. ### Response: def debug_text_simple(self, text: str): """ Draws a text in the top left corner of the screen (up to a max of 6 messages it seems). Don't forget to add 'await self._client.send_debug'. """ self._debug_texts.append(self.to_debug_message(text))
def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret
Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer'
Below is the the instruction that describes the task: ### Input: Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ### Response: def get_cert_file(name, cert_format=_DEFAULT_FORMAT, password=''): ''' Get the details of the certificate file. :param str name: The filesystem path of the certificate file. :param str cert_format: The certificate format. Specify 'cer' for X.509, or 'pfx' for PKCS #12. :param str password: The password of the certificate. Only applicable to pfx format. Note that if used interactively, the password will be seen by all minions. To protect the password, use a state and get the password from pillar. :return: A dictionary of the certificate thumbprints and properties. :rtype: dict CLI Example: .. code-block:: bash salt '*' win_pki.get_cert_file name='C:\\certs\\example.cer' ''' ret = dict() cmd = list() blacklist_keys = ['DnsNameList'] cert_format = cert_format.lower() _validate_cert_format(name=cert_format) if not name or not os.path.isfile(name): _LOG.error('Path is not present: %s', name) return ret if cert_format == 'pfx': if password: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'".format(name)) cmd.append(",'{0}'".format(password)) cmd.append(",'DefaultKeySet') ; $CertObject") cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append(r"Get-PfxCertificate -FilePath '{0}'".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') else: cmd.append('$CertObject = New-Object') cmd.append(' System.Security.Cryptography.X509Certificates.X509Certificate2;') cmd.append(r" $CertObject.Import('{0}'); $CertObject".format(name)) cmd.append(' | Select-Object DnsNameList, SerialNumber, Subject, ' 'Thumbprint, Version') items = _cmd_run(cmd=six.text_type().join(cmd), as_json=True) for item in items: for key in item: if key not in blacklist_keys: ret[key.lower()] = item[key] ret['dnsnames'] = [name['Unicode'] for name in item['DnsNameList']] if ret: _LOG.debug('Certificate thumbprint obtained successfully: %s', name) else: _LOG.error('Unable to obtain certificate thumbprint: %s', name) return ret
def on_the_air(self, **kwargs): """ Get the list of TV shows that are currently on the air. This query looks for any TV show that has an episode with an air date in the next 7 days. Args: page: (optional) Minimum 1, maximum 1000. language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('on_the_air') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
Get the list of TV shows that are currently on the air. This query looks for any TV show that has an episode with an air date in the next 7 days. Args: page: (optional) Minimum 1, maximum 1000. language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API.
Below is the the instruction that describes the task: ### Input: Get the list of TV shows that are currently on the air. This query looks for any TV show that has an episode with an air date in the next 7 days. Args: page: (optional) Minimum 1, maximum 1000. language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API. ### Response: def on_the_air(self, **kwargs): """ Get the list of TV shows that are currently on the air. This query looks for any TV show that has an episode with an air date in the next 7 days. Args: page: (optional) Minimum 1, maximum 1000. language: (optional) ISO 639 code. Returns: A dict respresentation of the JSON returned from the API. """ path = self._get_path('on_the_air') response = self._GET(path, kwargs) self._set_attrs_to_values(response) return response
def execute(self): """ Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it. """ # Preprocess options to extract --settings and --pythonpath. # These options could affect the commands that are available, so they # must be processed early. parser = NewOptionParser(prog=self.prog_name, usage=self.usage_info, # version=self.get_version(), formatter = NewFormatter(), add_help_option = False, option_list=self.option_list) options, args = parser.parse_args(self.argv) self.options = options if len(args) == 0: self.message("You should give at least one package name.", 'error') return subcommand = args[0] self.handle(args[1:], self.options, self.global_options, subcommand)
Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it.
Below is the the instruction that describes the task: ### Input: Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it. ### Response: def execute(self): """ Given the command-line arguments, this figures out which subcommand is being run, creates a parser appropriate to that command, and runs it. """ # Preprocess options to extract --settings and --pythonpath. # These options could affect the commands that are available, so they # must be processed early. parser = NewOptionParser(prog=self.prog_name, usage=self.usage_info, # version=self.get_version(), formatter = NewFormatter(), add_help_option = False, option_list=self.option_list) options, args = parser.parse_args(self.argv) self.options = options if len(args) == 0: self.message("You should give at least one package name.", 'error') return subcommand = args[0] self.handle(args[1:], self.options, self.global_options, subcommand)
def _get_location(package, location): """Get the longer key with a short location name.""" for region in package['regions']: if region['location']['location']['name'] == location: return region raise SoftLayer.SoftLayerError("Could not find valid location for: '%s'" % location)
Get the longer key with a short location name.
Below is the the instruction that describes the task: ### Input: Get the longer key with a short location name. ### Response: def _get_location(package, location): """Get the longer key with a short location name.""" for region in package['regions']: if region['location']['location']['name'] == location: return region raise SoftLayer.SoftLayerError("Could not find valid location for: '%s'" % location)
def _value_and_batch_jacobian(f, x): """Enables uniform interface to value and batch jacobian calculation. Works in both eager and graph modes. Arguments: f: The scalar function to evaluate. x: The value at which to compute the value and the batch jacobian. Returns: A tuple (f(x), J(x)), where J(x) is the batch jacobian. """ if tf.executing_eagerly(): with tf.GradientTape() as tape: tape.watch(x) value = f(x) batch_jacobian = tape.batch_jacobian(value, x) else: value = f(x) batch_jacobian = gradients.batch_jacobian(value, x) return value, batch_jacobian
Enables uniform interface to value and batch jacobian calculation. Works in both eager and graph modes. Arguments: f: The scalar function to evaluate. x: The value at which to compute the value and the batch jacobian. Returns: A tuple (f(x), J(x)), where J(x) is the batch jacobian.
Below is the the instruction that describes the task: ### Input: Enables uniform interface to value and batch jacobian calculation. Works in both eager and graph modes. Arguments: f: The scalar function to evaluate. x: The value at which to compute the value and the batch jacobian. Returns: A tuple (f(x), J(x)), where J(x) is the batch jacobian. ### Response: def _value_and_batch_jacobian(f, x): """Enables uniform interface to value and batch jacobian calculation. Works in both eager and graph modes. Arguments: f: The scalar function to evaluate. x: The value at which to compute the value and the batch jacobian. Returns: A tuple (f(x), J(x)), where J(x) is the batch jacobian. """ if tf.executing_eagerly(): with tf.GradientTape() as tape: tape.watch(x) value = f(x) batch_jacobian = tape.batch_jacobian(value, x) else: value = f(x) batch_jacobian = gradients.batch_jacobian(value, x) return value, batch_jacobian
def filter_table(table, filter_series, ignore=None): """ Filter a table based on a set of restrictions given in Series of column name / filter parameter pairs. The column names can have suffixes `_min` and `_max` to indicate "less than" and "greater than" constraints. Parameters ---------- table : pandas.DataFrame Table to filter. filter_series : pandas.Series Series of column name / value pairs of filter constraints. Columns that ends with '_max' will be used to create a "less than" filters, columns that end with '_min' will be used to create "greater than or equal to" filters. A column with no suffix will be used to make an 'equal to' filter. ignore : sequence of str, optional List of column names that should not be used for filtering. Returns ------- filtered : pandas.DataFrame """ with log_start_finish('filter table', logger): ignore = ignore if ignore else set() filters = [_filterize(name, val) for name, val in filter_series.iteritems() if not (name in ignore or (isinstance(val, numbers.Number) and np.isnan(val)))] return apply_filter_query(table, filters)
Filter a table based on a set of restrictions given in Series of column name / filter parameter pairs. The column names can have suffixes `_min` and `_max` to indicate "less than" and "greater than" constraints. Parameters ---------- table : pandas.DataFrame Table to filter. filter_series : pandas.Series Series of column name / value pairs of filter constraints. Columns that ends with '_max' will be used to create a "less than" filters, columns that end with '_min' will be used to create "greater than or equal to" filters. A column with no suffix will be used to make an 'equal to' filter. ignore : sequence of str, optional List of column names that should not be used for filtering. Returns ------- filtered : pandas.DataFrame
Below is the the instruction that describes the task: ### Input: Filter a table based on a set of restrictions given in Series of column name / filter parameter pairs. The column names can have suffixes `_min` and `_max` to indicate "less than" and "greater than" constraints. Parameters ---------- table : pandas.DataFrame Table to filter. filter_series : pandas.Series Series of column name / value pairs of filter constraints. Columns that ends with '_max' will be used to create a "less than" filters, columns that end with '_min' will be used to create "greater than or equal to" filters. A column with no suffix will be used to make an 'equal to' filter. ignore : sequence of str, optional List of column names that should not be used for filtering. Returns ------- filtered : pandas.DataFrame ### Response: def filter_table(table, filter_series, ignore=None): """ Filter a table based on a set of restrictions given in Series of column name / filter parameter pairs. The column names can have suffixes `_min` and `_max` to indicate "less than" and "greater than" constraints. Parameters ---------- table : pandas.DataFrame Table to filter. filter_series : pandas.Series Series of column name / value pairs of filter constraints. Columns that ends with '_max' will be used to create a "less than" filters, columns that end with '_min' will be used to create "greater than or equal to" filters. A column with no suffix will be used to make an 'equal to' filter. ignore : sequence of str, optional List of column names that should not be used for filtering. Returns ------- filtered : pandas.DataFrame """ with log_start_finish('filter table', logger): ignore = ignore if ignore else set() filters = [_filterize(name, val) for name, val in filter_series.iteritems() if not (name in ignore or (isinstance(val, numbers.Number) and np.isnan(val)))] return apply_filter_query(table, filters)
def get_collection(self): """ Return targeted mongo collection to query on """ db_mongo = self._mongo_client[self._index] return db_mongo[self._collection]
Return targeted mongo collection to query on
Below is the the instruction that describes the task: ### Input: Return targeted mongo collection to query on ### Response: def get_collection(self): """ Return targeted mongo collection to query on """ db_mongo = self._mongo_client[self._index] return db_mongo[self._collection]
def from_element(root, timezone): """Return a Schedule object based on an lxml Element for the <schedule> tag. timezone is a tzinfo object, ideally from pytz.""" assert root.tag == 'schedule' if root.xpath('intervals'): return _ScheduleIntervals(root, timezone) elif root.xpath('recurring_schedules'): return _ScheduleRecurring(root, timezone) raise NotImplementedError
Return a Schedule object based on an lxml Element for the <schedule> tag. timezone is a tzinfo object, ideally from pytz.
Below is the the instruction that describes the task: ### Input: Return a Schedule object based on an lxml Element for the <schedule> tag. timezone is a tzinfo object, ideally from pytz. ### Response: def from_element(root, timezone): """Return a Schedule object based on an lxml Element for the <schedule> tag. timezone is a tzinfo object, ideally from pytz.""" assert root.tag == 'schedule' if root.xpath('intervals'): return _ScheduleIntervals(root, timezone) elif root.xpath('recurring_schedules'): return _ScheduleRecurring(root, timezone) raise NotImplementedError
def set_lexer_from_filename(self, filename): """ Change the lexer based on the filename (actually only the extension is needed) :param filename: Filename or extension """ self._lexer = None if filename.endswith("~"): filename = filename[0:len(filename) - 1] try: self._lexer = get_lexer_for_filename(filename) except (ClassNotFound, ImportError): print('class not found for url', filename) try: m = mimetypes.guess_type(filename) print(m) self._lexer = get_lexer_for_mimetype(m[0]) except (ClassNotFound, IndexError, ImportError): self._lexer = get_lexer_for_mimetype('text/plain') if self._lexer is None: _logger().warning('failed to get lexer from filename: %s, using ' 'plain text instead...', filename) self._lexer = TextLexer()
Change the lexer based on the filename (actually only the extension is needed) :param filename: Filename or extension
Below is the the instruction that describes the task: ### Input: Change the lexer based on the filename (actually only the extension is needed) :param filename: Filename or extension ### Response: def set_lexer_from_filename(self, filename): """ Change the lexer based on the filename (actually only the extension is needed) :param filename: Filename or extension """ self._lexer = None if filename.endswith("~"): filename = filename[0:len(filename) - 1] try: self._lexer = get_lexer_for_filename(filename) except (ClassNotFound, ImportError): print('class not found for url', filename) try: m = mimetypes.guess_type(filename) print(m) self._lexer = get_lexer_for_mimetype(m[0]) except (ClassNotFound, IndexError, ImportError): self._lexer = get_lexer_for_mimetype('text/plain') if self._lexer is None: _logger().warning('failed to get lexer from filename: %s, using ' 'plain text instead...', filename) self._lexer = TextLexer()
def do_cld_check(self, cld): """ Do the "clause :math:`D`" check. This method receives a list of literals, which serves a "clause :math:`D`" [2]_, and checks whether the formula conjoined with :math:`D` is satisfiable. .. [2] Joao Marques-Silva, Federico Heras, Mikolas Janota, Alessandro Previti, Anton Belov. *On Computing Minimal Correction Subsets*. IJCAI 2013. pp. 615-622 If clause :math:`D` cannot be satisfied together with the formula, then negations of all of its literals are backbones of the formula and the LBX algorithm can stop. Otherwise, the literals satisfied by the new model refine the MCS further. Every time the method is called, a new fresh selector variable :math:`s` is introduced, which augments the current clause :math:`D`. The SAT oracle then checks if clause :math:`(D \\vee \\neg{s})` can be satisfied together with the internal formula. The :math:`D` clause is then disabled by adding a hard clause :math:`(\\neg{s})`. :param cld: clause :math:`D` to check :type cld: list(int) """ # adding a selector literal to clause D # selector literals for clauses D currently # cannot be reused, but this may change later self.topv += 1 sel = self.topv cld.append(-sel) # adding clause D self.oracle.add_clause(cld) if self.oracle.solve(assumptions=self.ss_assumps + self.bb_assumps + [sel]): # filtering satisfied self._filter_satisfied(update_setd=True) else: # clause D is unsatisfiable => all literals are backbones self.bb_assumps.extend([-l for l in cld[:-1]]) self.setd = [] # deactivating clause D self.oracle.add_clause([-sel])
Do the "clause :math:`D`" check. This method receives a list of literals, which serves a "clause :math:`D`" [2]_, and checks whether the formula conjoined with :math:`D` is satisfiable. .. [2] Joao Marques-Silva, Federico Heras, Mikolas Janota, Alessandro Previti, Anton Belov. *On Computing Minimal Correction Subsets*. IJCAI 2013. pp. 615-622 If clause :math:`D` cannot be satisfied together with the formula, then negations of all of its literals are backbones of the formula and the LBX algorithm can stop. Otherwise, the literals satisfied by the new model refine the MCS further. Every time the method is called, a new fresh selector variable :math:`s` is introduced, which augments the current clause :math:`D`. The SAT oracle then checks if clause :math:`(D \\vee \\neg{s})` can be satisfied together with the internal formula. The :math:`D` clause is then disabled by adding a hard clause :math:`(\\neg{s})`. :param cld: clause :math:`D` to check :type cld: list(int)
Below is the the instruction that describes the task: ### Input: Do the "clause :math:`D`" check. This method receives a list of literals, which serves a "clause :math:`D`" [2]_, and checks whether the formula conjoined with :math:`D` is satisfiable. .. [2] Joao Marques-Silva, Federico Heras, Mikolas Janota, Alessandro Previti, Anton Belov. *On Computing Minimal Correction Subsets*. IJCAI 2013. pp. 615-622 If clause :math:`D` cannot be satisfied together with the formula, then negations of all of its literals are backbones of the formula and the LBX algorithm can stop. Otherwise, the literals satisfied by the new model refine the MCS further. Every time the method is called, a new fresh selector variable :math:`s` is introduced, which augments the current clause :math:`D`. The SAT oracle then checks if clause :math:`(D \\vee \\neg{s})` can be satisfied together with the internal formula. The :math:`D` clause is then disabled by adding a hard clause :math:`(\\neg{s})`. :param cld: clause :math:`D` to check :type cld: list(int) ### Response: def do_cld_check(self, cld): """ Do the "clause :math:`D`" check. This method receives a list of literals, which serves a "clause :math:`D`" [2]_, and checks whether the formula conjoined with :math:`D` is satisfiable. .. [2] Joao Marques-Silva, Federico Heras, Mikolas Janota, Alessandro Previti, Anton Belov. *On Computing Minimal Correction Subsets*. IJCAI 2013. pp. 615-622 If clause :math:`D` cannot be satisfied together with the formula, then negations of all of its literals are backbones of the formula and the LBX algorithm can stop. Otherwise, the literals satisfied by the new model refine the MCS further. Every time the method is called, a new fresh selector variable :math:`s` is introduced, which augments the current clause :math:`D`. The SAT oracle then checks if clause :math:`(D \\vee \\neg{s})` can be satisfied together with the internal formula. The :math:`D` clause is then disabled by adding a hard clause :math:`(\\neg{s})`. :param cld: clause :math:`D` to check :type cld: list(int) """ # adding a selector literal to clause D # selector literals for clauses D currently # cannot be reused, but this may change later self.topv += 1 sel = self.topv cld.append(-sel) # adding clause D self.oracle.add_clause(cld) if self.oracle.solve(assumptions=self.ss_assumps + self.bb_assumps + [sel]): # filtering satisfied self._filter_satisfied(update_setd=True) else: # clause D is unsatisfiable => all literals are backbones self.bb_assumps.extend([-l for l in cld[:-1]]) self.setd = [] # deactivating clause D self.oracle.add_clause([-sel])
def from_path(cls, path, suffix=''): """ Convenience method to run critic2 analysis on a folder containing typical VASP output files. This method will: 1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped counterparts. 2. If AECCAR* files are present, constructs a temporary reference file as AECCAR0 + AECCAR2 3. Runs critic2 analysis twice: once for charge, and a second time for the charge difference (magnetization density). :param path: path to folder to search in :param suffix: specific suffix to look for (e.g. '.relax1' for 'CHGCAR.relax1.gz') :return: """ def _get_filepath(filename, warning, path=path, suffix=suffix): paths = glob.glob(os.path.join(path, filename + suffix + '*')) if not paths: warnings.warn(warning) return None if len(paths) > 1: # using reverse=True because, if multiple files are present, # they likely have suffixes 'static', 'relax', 'relax2', etc. # and this would give 'static' over 'relax2' over 'relax' # however, better to use 'suffix' kwarg to avoid this! paths.sort(reverse=True) warnings.warn('Multiple files detected, using {}'.format(os.path.basename(path))) path = paths[0] return path chgcar_path = _get_filepath('CHGCAR', 'Could not find CHGCAR!') chgcar = Chgcar.from_file(chgcar_path) aeccar0_path = _get_filepath('AECCAR0', 'Could not find AECCAR0, interpret Bader results with caution.') aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None aeccar2_path = _get_filepath('AECCAR2', 'Could not find AECCAR2, interpret Bader results with caution.') aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None chgcar_ref = aeccar0.linear_add(aeccar2) if (aeccar0 and aeccar2) else None return cls(chgcar.structure, chgcar, chgcar_ref)
Convenience method to run critic2 analysis on a folder containing typical VASP output files. This method will: 1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped counterparts. 2. If AECCAR* files are present, constructs a temporary reference file as AECCAR0 + AECCAR2 3. Runs critic2 analysis twice: once for charge, and a second time for the charge difference (magnetization density). :param path: path to folder to search in :param suffix: specific suffix to look for (e.g. '.relax1' for 'CHGCAR.relax1.gz') :return:
Below is the the instruction that describes the task: ### Input: Convenience method to run critic2 analysis on a folder containing typical VASP output files. This method will: 1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped counterparts. 2. If AECCAR* files are present, constructs a temporary reference file as AECCAR0 + AECCAR2 3. Runs critic2 analysis twice: once for charge, and a second time for the charge difference (magnetization density). :param path: path to folder to search in :param suffix: specific suffix to look for (e.g. '.relax1' for 'CHGCAR.relax1.gz') :return: ### Response: def from_path(cls, path, suffix=''): """ Convenience method to run critic2 analysis on a folder containing typical VASP output files. This method will: 1. Look for files CHGCAR, AECAR0, AECAR2, POTCAR or their gzipped counterparts. 2. If AECCAR* files are present, constructs a temporary reference file as AECCAR0 + AECCAR2 3. Runs critic2 analysis twice: once for charge, and a second time for the charge difference (magnetization density). :param path: path to folder to search in :param suffix: specific suffix to look for (e.g. '.relax1' for 'CHGCAR.relax1.gz') :return: """ def _get_filepath(filename, warning, path=path, suffix=suffix): paths = glob.glob(os.path.join(path, filename + suffix + '*')) if not paths: warnings.warn(warning) return None if len(paths) > 1: # using reverse=True because, if multiple files are present, # they likely have suffixes 'static', 'relax', 'relax2', etc. # and this would give 'static' over 'relax2' over 'relax' # however, better to use 'suffix' kwarg to avoid this! paths.sort(reverse=True) warnings.warn('Multiple files detected, using {}'.format(os.path.basename(path))) path = paths[0] return path chgcar_path = _get_filepath('CHGCAR', 'Could not find CHGCAR!') chgcar = Chgcar.from_file(chgcar_path) aeccar0_path = _get_filepath('AECCAR0', 'Could not find AECCAR0, interpret Bader results with caution.') aeccar0 = Chgcar.from_file(aeccar0_path) if aeccar0_path else None aeccar2_path = _get_filepath('AECCAR2', 'Could not find AECCAR2, interpret Bader results with caution.') aeccar2 = Chgcar.from_file(aeccar2_path) if aeccar2_path else None chgcar_ref = aeccar0.linear_add(aeccar2) if (aeccar0 and aeccar2) else None return cls(chgcar.structure, chgcar, chgcar_ref)
def get_ar_count(self): """Return the ar_count request paramteter """ ar_count = 1 try: ar_count = int(self.request.form.get("ar_count", 1)) except (TypeError, ValueError): ar_count = 1 return ar_count
Return the ar_count request paramteter
Below is the the instruction that describes the task: ### Input: Return the ar_count request paramteter ### Response: def get_ar_count(self): """Return the ar_count request paramteter """ ar_count = 1 try: ar_count = int(self.request.form.get("ar_count", 1)) except (TypeError, ValueError): ar_count = 1 return ar_count
def sub_dimension(self, index, dimension, propagate=True, inplace=False): """Return a ChemicalEntity sliced through a dimension. If other dimensions depend on this one those are updated accordingly. """ filter_ = self._propagate_dim(index, dimension, propagate) return self.subindex(filter_, inplace)
Return a ChemicalEntity sliced through a dimension. If other dimensions depend on this one those are updated accordingly.
Below is the the instruction that describes the task: ### Input: Return a ChemicalEntity sliced through a dimension. If other dimensions depend on this one those are updated accordingly. ### Response: def sub_dimension(self, index, dimension, propagate=True, inplace=False): """Return a ChemicalEntity sliced through a dimension. If other dimensions depend on this one those are updated accordingly. """ filter_ = self._propagate_dim(index, dimension, propagate) return self.subindex(filter_, inplace)
def objective(param_scales=(1, 1), xstar=None, seed=None): """Gives objective functions a number of dimensions and parameter range Parameters ---------- param_scales : (int, int) Scale (std. dev.) for choosing each parameter xstar : array_like Optimal parameters """ ndim = len(param_scales) def decorator(func): @wraps(func) def wrapper(theta): return func(theta) def param_init(): np.random.seed(seed) return np.random.randn(ndim,) * np.array(param_scales) wrapper.ndim = ndim wrapper.param_init = param_init wrapper.xstar = xstar return wrapper return decorator
Gives objective functions a number of dimensions and parameter range Parameters ---------- param_scales : (int, int) Scale (std. dev.) for choosing each parameter xstar : array_like Optimal parameters
Below is the the instruction that describes the task: ### Input: Gives objective functions a number of dimensions and parameter range Parameters ---------- param_scales : (int, int) Scale (std. dev.) for choosing each parameter xstar : array_like Optimal parameters ### Response: def objective(param_scales=(1, 1), xstar=None, seed=None): """Gives objective functions a number of dimensions and parameter range Parameters ---------- param_scales : (int, int) Scale (std. dev.) for choosing each parameter xstar : array_like Optimal parameters """ ndim = len(param_scales) def decorator(func): @wraps(func) def wrapper(theta): return func(theta) def param_init(): np.random.seed(seed) return np.random.randn(ndim,) * np.array(param_scales) wrapper.ndim = ndim wrapper.param_init = param_init wrapper.xstar = xstar return wrapper return decorator
def prune_tree_for_supertree(self, ott, to_prune_fsi_set, root_ott_id, taxonomy_treefile=None, id_to_other_prune_reason=None): """ `to_prune_fsi_set` is a set of flag indices to be pruned. """ if id_to_other_prune_reason is None: id_to_other_prune_reason = {} self.prune_to_ingroup() self.prune_unmapped_leaves() other_pruned = set() if id_to_other_prune_reason: id2p = set(id_to_other_prune_reason.keys()).intersection(set(self.by_ott_id.keys())) for ott_id in id2p: reason = id_to_other_prune_reason[ott_id] self.prune_ott_problem_leaves_by_id(ott_id, reason) # Check the stored OTT Ids against the current version of OTT mapped, unrecog, forward2unrecog, pruned, above_root, old2new = ott.map_ott_ids(self.by_ott_id.keys(), to_prune_fsi_set, root_ott_id) for ott_id in unrecog: self.prune_ott_problem_leaves_by_id(ott_id, 'unrecognized_ott_id') for ott_id in forward2unrecog: self.prune_ott_problem_leaves_by_id(ott_id, 'forwarded_to_unrecognized_ott_id') for ott_id in pruned: self.prune_ott_problem_leaves_by_id(ott_id, 'flagged') for ott_id in above_root: self.prune_ott_problem_leaves_by_id(ott_id, 'above_root') for old_id, new_id in old2new.items(): old_node_list = self.by_ott_id[old_id] del self.by_ott_id[old_id] if new_id in self.by_ott_id: v = self.by_ott_id[new_id] v.extend(old_node_list) v.sort() # I think only the last step requires sorting (NEED to check that, # If so, we could move this sort to that point to avoid multiple sortings. else: self.by_ott_id[new_id] = old_node_list for sortable_el in old_node_list: otu = sortable_el[3] assert otu['^ot:ottId'] == old_id otu['^ot:ottId'] = new_id assert '^ot:ottTaxonName' in otu otu['^ot:ottTaxonName'] = ott.get_name(new_id) lost_tips = set(unrecog) lost_tips.update(forward2unrecog) lost_tips.update(pruned) lost_tips.update(other_pruned) # Get the induced tree... assert self.root_node_id try: ott_tree = ott.induced_tree(mapped, create_monotypic_nodes=True) except SpikeTreeError: error('SpikeTreeError from mapped ott_id list = {}'.format(', '.join([str(i) for i in mapped]))) raise EmptyTreeError() if taxonomy_treefile is not None: with codecs.open(taxonomy_treefile, 'w', encoding='utf-8') as tto: ott_tree.write_newick(tto) # ... so that we can look for leaves mapped to ancestors of other leaves taxon_contains_other_ott_ids = [] to_retain = [] for ott_id in self.by_ott_id: if ott_id in lost_tips: continue n = old2new.get(ott_id) if n is None: n = ott_id nd = ott_tree.find_node(n) assert nd is not None if nd.children: # nd must be an internal node. # given that the descendants of this node are mapped in a more specific # way, we will prune this ott_id from the tree taxon_contains_other_ott_ids.append(ott_id) else: to_retain.append(ott_id) for ott_id in taxon_contains_other_ott_ids: self.prune_ott_problem_leaves_by_id(ott_id, 'mapped_to_taxon_containing_other_mapped_tips') # finally, we walk through any ott_id's mapped to multiple nodes for ott_id in to_retain: nm = self.by_ott_id[ott_id] if len(nm) > 1: el = nm.pop(0) reason = 'replaced_by_exemplar_node' if (el[0] == -1) else 'replaced_by_arbitrary_node' self.prune_ott_problem_leaves_by_id(ott_id, reason) return self
`to_prune_fsi_set` is a set of flag indices to be pruned.
Below is the the instruction that describes the task: ### Input: `to_prune_fsi_set` is a set of flag indices to be pruned. ### Response: def prune_tree_for_supertree(self, ott, to_prune_fsi_set, root_ott_id, taxonomy_treefile=None, id_to_other_prune_reason=None): """ `to_prune_fsi_set` is a set of flag indices to be pruned. """ if id_to_other_prune_reason is None: id_to_other_prune_reason = {} self.prune_to_ingroup() self.prune_unmapped_leaves() other_pruned = set() if id_to_other_prune_reason: id2p = set(id_to_other_prune_reason.keys()).intersection(set(self.by_ott_id.keys())) for ott_id in id2p: reason = id_to_other_prune_reason[ott_id] self.prune_ott_problem_leaves_by_id(ott_id, reason) # Check the stored OTT Ids against the current version of OTT mapped, unrecog, forward2unrecog, pruned, above_root, old2new = ott.map_ott_ids(self.by_ott_id.keys(), to_prune_fsi_set, root_ott_id) for ott_id in unrecog: self.prune_ott_problem_leaves_by_id(ott_id, 'unrecognized_ott_id') for ott_id in forward2unrecog: self.prune_ott_problem_leaves_by_id(ott_id, 'forwarded_to_unrecognized_ott_id') for ott_id in pruned: self.prune_ott_problem_leaves_by_id(ott_id, 'flagged') for ott_id in above_root: self.prune_ott_problem_leaves_by_id(ott_id, 'above_root') for old_id, new_id in old2new.items(): old_node_list = self.by_ott_id[old_id] del self.by_ott_id[old_id] if new_id in self.by_ott_id: v = self.by_ott_id[new_id] v.extend(old_node_list) v.sort() # I think only the last step requires sorting (NEED to check that, # If so, we could move this sort to that point to avoid multiple sortings. else: self.by_ott_id[new_id] = old_node_list for sortable_el in old_node_list: otu = sortable_el[3] assert otu['^ot:ottId'] == old_id otu['^ot:ottId'] = new_id assert '^ot:ottTaxonName' in otu otu['^ot:ottTaxonName'] = ott.get_name(new_id) lost_tips = set(unrecog) lost_tips.update(forward2unrecog) lost_tips.update(pruned) lost_tips.update(other_pruned) # Get the induced tree... assert self.root_node_id try: ott_tree = ott.induced_tree(mapped, create_monotypic_nodes=True) except SpikeTreeError: error('SpikeTreeError from mapped ott_id list = {}'.format(', '.join([str(i) for i in mapped]))) raise EmptyTreeError() if taxonomy_treefile is not None: with codecs.open(taxonomy_treefile, 'w', encoding='utf-8') as tto: ott_tree.write_newick(tto) # ... so that we can look for leaves mapped to ancestors of other leaves taxon_contains_other_ott_ids = [] to_retain = [] for ott_id in self.by_ott_id: if ott_id in lost_tips: continue n = old2new.get(ott_id) if n is None: n = ott_id nd = ott_tree.find_node(n) assert nd is not None if nd.children: # nd must be an internal node. # given that the descendants of this node are mapped in a more specific # way, we will prune this ott_id from the tree taxon_contains_other_ott_ids.append(ott_id) else: to_retain.append(ott_id) for ott_id in taxon_contains_other_ott_ids: self.prune_ott_problem_leaves_by_id(ott_id, 'mapped_to_taxon_containing_other_mapped_tips') # finally, we walk through any ott_id's mapped to multiple nodes for ott_id in to_retain: nm = self.by_ott_id[ott_id] if len(nm) > 1: el = nm.pop(0) reason = 'replaced_by_exemplar_node' if (el[0] == -1) else 'replaced_by_arbitrary_node' self.prune_ott_problem_leaves_by_id(ott_id, reason) return self
def save_xml(xml_str, file_name, pretty=True): """Save the TRIPS EKB XML in a file. Parameters ---------- xml_str : str The TRIPS EKB XML string to be saved. file_name : str The name of the file to save the result in. pretty : Optional[bool] If True, the XML is pretty printed. """ try: fh = open(file_name, 'wt') except IOError: logger.error('Could not open %s for writing.' % file_name) return if pretty: xmld = xml.dom.minidom.parseString(xml_str) xml_str_pretty = xmld.toprettyxml() fh.write(xml_str_pretty) else: fh.write(xml_str) fh.close()
Save the TRIPS EKB XML in a file. Parameters ---------- xml_str : str The TRIPS EKB XML string to be saved. file_name : str The name of the file to save the result in. pretty : Optional[bool] If True, the XML is pretty printed.
Below is the the instruction that describes the task: ### Input: Save the TRIPS EKB XML in a file. Parameters ---------- xml_str : str The TRIPS EKB XML string to be saved. file_name : str The name of the file to save the result in. pretty : Optional[bool] If True, the XML is pretty printed. ### Response: def save_xml(xml_str, file_name, pretty=True): """Save the TRIPS EKB XML in a file. Parameters ---------- xml_str : str The TRIPS EKB XML string to be saved. file_name : str The name of the file to save the result in. pretty : Optional[bool] If True, the XML is pretty printed. """ try: fh = open(file_name, 'wt') except IOError: logger.error('Could not open %s for writing.' % file_name) return if pretty: xmld = xml.dom.minidom.parseString(xml_str) xml_str_pretty = xmld.toprettyxml() fh.write(xml_str_pretty) else: fh.write(xml_str) fh.close()
def deprecated(report): """ The deprecated implementation of report printing. :param report: dict """ warnings.warn("Printing dict-based reports is deprecated. This function " "is included only to support a private development branch " "and may be removed without warning.") for key in report: confidence_name = 'confidence' correctness_name = 'correctness' if confidence_name not in report[key]: confidence_name = 'all_probs' correctness_name = 'correctness_mask' warnings.warn("'all_probs' is used only to temporarily support " "the private development branch. This name can be " "removed at any time without warning.") covered = report[key][confidence_name] > 0.5 wrong = 1. - report[key][correctness_name] failure_rate = (covered * wrong).mean() print(key, 'failure rate at t=.5', failure_rate) print(key, 'accuracy at t=0', report[key][correctness_name].mean())
The deprecated implementation of report printing. :param report: dict
Below is the the instruction that describes the task: ### Input: The deprecated implementation of report printing. :param report: dict ### Response: def deprecated(report): """ The deprecated implementation of report printing. :param report: dict """ warnings.warn("Printing dict-based reports is deprecated. This function " "is included only to support a private development branch " "and may be removed without warning.") for key in report: confidence_name = 'confidence' correctness_name = 'correctness' if confidence_name not in report[key]: confidence_name = 'all_probs' correctness_name = 'correctness_mask' warnings.warn("'all_probs' is used only to temporarily support " "the private development branch. This name can be " "removed at any time without warning.") covered = report[key][confidence_name] > 0.5 wrong = 1. - report[key][correctness_name] failure_rate = (covered * wrong).mean() print(key, 'failure rate at t=.5', failure_rate) print(key, 'accuracy at t=0', report[key][correctness_name].mean())
def getLaplaceCovar(self): """ USES LAPLACE APPROXIMATION TO CALCULATE THE COVARIANCE MATRIX OF THE OPTIMIZED PARAMETERS """ assert self.init, 'GP not initialised' assert self.fast==False, 'Not supported for fast implementation' if self.cache['Sigma']==None: self.cache['Sigma'] = SP.linalg.inv(self.getHessian()) return self.cache['Sigma']
USES LAPLACE APPROXIMATION TO CALCULATE THE COVARIANCE MATRIX OF THE OPTIMIZED PARAMETERS
Below is the the instruction that describes the task: ### Input: USES LAPLACE APPROXIMATION TO CALCULATE THE COVARIANCE MATRIX OF THE OPTIMIZED PARAMETERS ### Response: def getLaplaceCovar(self): """ USES LAPLACE APPROXIMATION TO CALCULATE THE COVARIANCE MATRIX OF THE OPTIMIZED PARAMETERS """ assert self.init, 'GP not initialised' assert self.fast==False, 'Not supported for fast implementation' if self.cache['Sigma']==None: self.cache['Sigma'] = SP.linalg.inv(self.getHessian()) return self.cache['Sigma']
def kegg_mapping_and_metadata(self, kegg_organism_code, custom_gene_mapping=None, outdir=None, set_as_representative=False, force_rerun=False): """Map all genes in the model to KEGG IDs using the KEGG service. Steps: 1. Download all metadata and sequence files in the sequences directory 2. Creates a KEGGProp object in the protein.sequences attribute 3. Returns a Pandas DataFrame of mapping results Args: kegg_organism_code (str): The three letter KEGG code of your organism custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map, custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones. Dictionary keys must match model gene IDs. outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories were not created initially set_as_representative (bool): If mapped KEGG IDs should be set as representative sequences force_rerun (bool): If you want to overwrite any existing mappings and files """ # First map all of the organism's KEGG genes to UniProt kegg_to_uniprot = ssbio.databases.kegg.map_kegg_all_genes(organism_code=kegg_organism_code, target_db='uniprot') successfully_mapped_counter = 0 for g in tqdm(self.genes): if custom_gene_mapping: kegg_g = custom_gene_mapping[g.id] else: kegg_g = g.id if kegg_g not in kegg_to_uniprot: log.debug('{}: unable to map to KEGG'.format(g.id)) continue # Download both FASTA and KEGG metadata files kegg_prop = g.protein.load_kegg(kegg_id=kegg_g, kegg_organism_code=kegg_organism_code, download=True, outdir=outdir, set_as_representative=set_as_representative, force_rerun=force_rerun) # Update potentially old UniProt ID if kegg_g in kegg_to_uniprot.keys(): kegg_prop.uniprot = kegg_to_uniprot[kegg_g] if g.protein.representative_sequence: if g.protein.representative_sequence.kegg == kegg_prop.kegg: g.protein.representative_sequence.uniprot = kegg_to_uniprot[kegg_g] # Keep track of missing mappings - missing is defined by no available sequence if kegg_prop.sequence_file: successfully_mapped_counter += 1 log.debug('{}: loaded KEGG information for gene'.format(g.id)) log.info('{}/{}: number of genes mapped to KEGG'.format(successfully_mapped_counter, len(self.genes))) log.info('Completed ID mapping --> KEGG. See the "df_kegg_metadata" attribute for a summary dataframe.')
Map all genes in the model to KEGG IDs using the KEGG service. Steps: 1. Download all metadata and sequence files in the sequences directory 2. Creates a KEGGProp object in the protein.sequences attribute 3. Returns a Pandas DataFrame of mapping results Args: kegg_organism_code (str): The three letter KEGG code of your organism custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map, custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones. Dictionary keys must match model gene IDs. outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories were not created initially set_as_representative (bool): If mapped KEGG IDs should be set as representative sequences force_rerun (bool): If you want to overwrite any existing mappings and files
Below is the the instruction that describes the task: ### Input: Map all genes in the model to KEGG IDs using the KEGG service. Steps: 1. Download all metadata and sequence files in the sequences directory 2. Creates a KEGGProp object in the protein.sequences attribute 3. Returns a Pandas DataFrame of mapping results Args: kegg_organism_code (str): The three letter KEGG code of your organism custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map, custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones. Dictionary keys must match model gene IDs. outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories were not created initially set_as_representative (bool): If mapped KEGG IDs should be set as representative sequences force_rerun (bool): If you want to overwrite any existing mappings and files ### Response: def kegg_mapping_and_metadata(self, kegg_organism_code, custom_gene_mapping=None, outdir=None, set_as_representative=False, force_rerun=False): """Map all genes in the model to KEGG IDs using the KEGG service. Steps: 1. Download all metadata and sequence files in the sequences directory 2. Creates a KEGGProp object in the protein.sequences attribute 3. Returns a Pandas DataFrame of mapping results Args: kegg_organism_code (str): The three letter KEGG code of your organism custom_gene_mapping (dict): If your model genes differ from the gene IDs you want to map, custom_gene_mapping allows you to input a dictionary which maps model gene IDs to new ones. Dictionary keys must match model gene IDs. outdir (str): Path to output directory of downloaded files, must be set if GEM-PRO directories were not created initially set_as_representative (bool): If mapped KEGG IDs should be set as representative sequences force_rerun (bool): If you want to overwrite any existing mappings and files """ # First map all of the organism's KEGG genes to UniProt kegg_to_uniprot = ssbio.databases.kegg.map_kegg_all_genes(organism_code=kegg_organism_code, target_db='uniprot') successfully_mapped_counter = 0 for g in tqdm(self.genes): if custom_gene_mapping: kegg_g = custom_gene_mapping[g.id] else: kegg_g = g.id if kegg_g not in kegg_to_uniprot: log.debug('{}: unable to map to KEGG'.format(g.id)) continue # Download both FASTA and KEGG metadata files kegg_prop = g.protein.load_kegg(kegg_id=kegg_g, kegg_organism_code=kegg_organism_code, download=True, outdir=outdir, set_as_representative=set_as_representative, force_rerun=force_rerun) # Update potentially old UniProt ID if kegg_g in kegg_to_uniprot.keys(): kegg_prop.uniprot = kegg_to_uniprot[kegg_g] if g.protein.representative_sequence: if g.protein.representative_sequence.kegg == kegg_prop.kegg: g.protein.representative_sequence.uniprot = kegg_to_uniprot[kegg_g] # Keep track of missing mappings - missing is defined by no available sequence if kegg_prop.sequence_file: successfully_mapped_counter += 1 log.debug('{}: loaded KEGG information for gene'.format(g.id)) log.info('{}/{}: number of genes mapped to KEGG'.format(successfully_mapped_counter, len(self.genes))) log.info('Completed ID mapping --> KEGG. See the "df_kegg_metadata" attribute for a summary dataframe.')
def new_backup(self, src): """ Create a new backup file allocation """ backup_id_file = p.join(self.backup_dir, '.bk_idx') backup_num = file_or_default(backup_id_file, 1, int) backup_name = str(backup_num) + "_" + os.path.basename(src) backup_num += 1 file_put_contents(backup_id_file, str(backup_num)) return p.join(self.backup_dir, backup_name)
Create a new backup file allocation
Below is the the instruction that describes the task: ### Input: Create a new backup file allocation ### Response: def new_backup(self, src): """ Create a new backup file allocation """ backup_id_file = p.join(self.backup_dir, '.bk_idx') backup_num = file_or_default(backup_id_file, 1, int) backup_name = str(backup_num) + "_" + os.path.basename(src) backup_num += 1 file_put_contents(backup_id_file, str(backup_num)) return p.join(self.backup_dir, backup_name)
def header_encode_lines(self, string, maxlengths): """Header-encode a string by converting it first to bytes. This is similar to `header_encode()` except that the string is fit into maximum line lengths as given by the argument. :param string: A unicode string for the header. It must be possible to encode this string to bytes using the character set's output codec. :param maxlengths: Maximum line length iterator. Each element returned from this iterator will provide the next maximum line length. This parameter is used as an argument to built-in next() and should never be exhausted. The maximum line lengths should not count the RFC 2047 chrome. These line lengths are only a hint; the splitter does the best it can. :return: Lines of encoded strings, each with RFC 2047 chrome. """ # See which encoding we should use. codec = self.output_codec or 'us-ascii' header_bytes = _encode(string, codec) encoder_module = self._get_encoder(header_bytes) encoder = partial(encoder_module.header_encode, charset=codec) # Calculate the number of characters that the RFC 2047 chrome will # contribute to each line. charset = self.get_output_charset() extra = len(charset) + RFC2047_CHROME_LEN # Now comes the hard part. We must encode bytes but we can't split on # bytes because some character sets are variable length and each # encoded word must stand on its own. So the problem is you have to # encode to bytes to figure out this word's length, but you must split # on characters. This causes two problems: first, we don't know how # many octets a specific substring of unicode characters will get # encoded to, and second, we don't know how many ASCII characters # those octets will get encoded to. Unless we try it. Which seems # inefficient. In the interest of being correct rather than fast (and # in the hope that there will be few encoded headers in any such # message), brute force it. :( lines = [] current_line = [] maxlen = next(maxlengths) - extra for character in string: current_line.append(character) this_line = EMPTYSTRING.join(current_line) length = encoder_module.header_length(_encode(this_line, charset)) if length > maxlen: # This last character doesn't fit so pop it off. current_line.pop() # Does nothing fit on the first line? if not lines and not current_line: lines.append(None) else: separator = (' ' if lines else '') joined_line = EMPTYSTRING.join(current_line) header_bytes = _encode(joined_line, codec) lines.append(encoder(header_bytes)) current_line = [character] maxlen = next(maxlengths) - extra joined_line = EMPTYSTRING.join(current_line) header_bytes = _encode(joined_line, codec) lines.append(encoder(header_bytes)) return lines
Header-encode a string by converting it first to bytes. This is similar to `header_encode()` except that the string is fit into maximum line lengths as given by the argument. :param string: A unicode string for the header. It must be possible to encode this string to bytes using the character set's output codec. :param maxlengths: Maximum line length iterator. Each element returned from this iterator will provide the next maximum line length. This parameter is used as an argument to built-in next() and should never be exhausted. The maximum line lengths should not count the RFC 2047 chrome. These line lengths are only a hint; the splitter does the best it can. :return: Lines of encoded strings, each with RFC 2047 chrome.
Below is the the instruction that describes the task: ### Input: Header-encode a string by converting it first to bytes. This is similar to `header_encode()` except that the string is fit into maximum line lengths as given by the argument. :param string: A unicode string for the header. It must be possible to encode this string to bytes using the character set's output codec. :param maxlengths: Maximum line length iterator. Each element returned from this iterator will provide the next maximum line length. This parameter is used as an argument to built-in next() and should never be exhausted. The maximum line lengths should not count the RFC 2047 chrome. These line lengths are only a hint; the splitter does the best it can. :return: Lines of encoded strings, each with RFC 2047 chrome. ### Response: def header_encode_lines(self, string, maxlengths): """Header-encode a string by converting it first to bytes. This is similar to `header_encode()` except that the string is fit into maximum line lengths as given by the argument. :param string: A unicode string for the header. It must be possible to encode this string to bytes using the character set's output codec. :param maxlengths: Maximum line length iterator. Each element returned from this iterator will provide the next maximum line length. This parameter is used as an argument to built-in next() and should never be exhausted. The maximum line lengths should not count the RFC 2047 chrome. These line lengths are only a hint; the splitter does the best it can. :return: Lines of encoded strings, each with RFC 2047 chrome. """ # See which encoding we should use. codec = self.output_codec or 'us-ascii' header_bytes = _encode(string, codec) encoder_module = self._get_encoder(header_bytes) encoder = partial(encoder_module.header_encode, charset=codec) # Calculate the number of characters that the RFC 2047 chrome will # contribute to each line. charset = self.get_output_charset() extra = len(charset) + RFC2047_CHROME_LEN # Now comes the hard part. We must encode bytes but we can't split on # bytes because some character sets are variable length and each # encoded word must stand on its own. So the problem is you have to # encode to bytes to figure out this word's length, but you must split # on characters. This causes two problems: first, we don't know how # many octets a specific substring of unicode characters will get # encoded to, and second, we don't know how many ASCII characters # those octets will get encoded to. Unless we try it. Which seems # inefficient. In the interest of being correct rather than fast (and # in the hope that there will be few encoded headers in any such # message), brute force it. :( lines = [] current_line = [] maxlen = next(maxlengths) - extra for character in string: current_line.append(character) this_line = EMPTYSTRING.join(current_line) length = encoder_module.header_length(_encode(this_line, charset)) if length > maxlen: # This last character doesn't fit so pop it off. current_line.pop() # Does nothing fit on the first line? if not lines and not current_line: lines.append(None) else: separator = (' ' if lines else '') joined_line = EMPTYSTRING.join(current_line) header_bytes = _encode(joined_line, codec) lines.append(encoder(header_bytes)) current_line = [character] maxlen = next(maxlengths) - extra joined_line = EMPTYSTRING.join(current_line) header_bytes = _encode(joined_line, codec) lines.append(encoder(header_bytes)) return lines
def create_linked_data_element(self, url, kind, id=None, # pylint: disable=W0622 relation=None, title=None): """ Returns a new linked data element for the given url and kind. :param str url: URL to assign to the linked data element. :param str kind: kind of the resource that is linked. One of the constantes defined by :class:`everest.constants.RESOURCE_KINDS`. :returns: object implementing :class:`ILinkedDataElement`. """ mp = self.__mp_reg.find_or_create_mapping(Link) return mp.data_element_class.create(url, kind, id=id, relation=relation, title=title)
Returns a new linked data element for the given url and kind. :param str url: URL to assign to the linked data element. :param str kind: kind of the resource that is linked. One of the constantes defined by :class:`everest.constants.RESOURCE_KINDS`. :returns: object implementing :class:`ILinkedDataElement`.
Below is the the instruction that describes the task: ### Input: Returns a new linked data element for the given url and kind. :param str url: URL to assign to the linked data element. :param str kind: kind of the resource that is linked. One of the constantes defined by :class:`everest.constants.RESOURCE_KINDS`. :returns: object implementing :class:`ILinkedDataElement`. ### Response: def create_linked_data_element(self, url, kind, id=None, # pylint: disable=W0622 relation=None, title=None): """ Returns a new linked data element for the given url and kind. :param str url: URL to assign to the linked data element. :param str kind: kind of the resource that is linked. One of the constantes defined by :class:`everest.constants.RESOURCE_KINDS`. :returns: object implementing :class:`ILinkedDataElement`. """ mp = self.__mp_reg.find_or_create_mapping(Link) return mp.data_element_class.create(url, kind, id=id, relation=relation, title=title)
def download_and_extract(path, url, input_filename, target_filename): """Extract files from downloaded compressed archive file. Args: path: string directory where the files will be downloaded url: url containing the compressed input and target files input_filename: name of file containing data in source language target_filename: name of file containing data in target language Returns: Full paths to extracted input and target files. Raises: OSError: if the the download/extraction fails. """ logging.info('Downloading and extracting data to: %s' % path) # Check if extracted files already exist in path input_file = find_file(path, input_filename) target_file = find_file(path, target_filename) if input_file and target_file: logging.info("Already downloaded and extracted %s." % url) return input_file, target_file # Download archive file if it doesn't already exist. compressed_file = download_from_url(path, url) # Extract compressed files logging.info("Extracting %s." % compressed_file) with tarfile.open(compressed_file, "r:gz") as corpus_tar: corpus_tar.extractall(path) # Return filepaths of the requested files. input_file = find_file(path, input_filename) target_file = find_file(path, target_filename) if input_file and target_file: return input_file, target_file raise OSError("Download/extraction failed for url %s to path %s" % (url, path))
Extract files from downloaded compressed archive file. Args: path: string directory where the files will be downloaded url: url containing the compressed input and target files input_filename: name of file containing data in source language target_filename: name of file containing data in target language Returns: Full paths to extracted input and target files. Raises: OSError: if the the download/extraction fails.
Below is the the instruction that describes the task: ### Input: Extract files from downloaded compressed archive file. Args: path: string directory where the files will be downloaded url: url containing the compressed input and target files input_filename: name of file containing data in source language target_filename: name of file containing data in target language Returns: Full paths to extracted input and target files. Raises: OSError: if the the download/extraction fails. ### Response: def download_and_extract(path, url, input_filename, target_filename): """Extract files from downloaded compressed archive file. Args: path: string directory where the files will be downloaded url: url containing the compressed input and target files input_filename: name of file containing data in source language target_filename: name of file containing data in target language Returns: Full paths to extracted input and target files. Raises: OSError: if the the download/extraction fails. """ logging.info('Downloading and extracting data to: %s' % path) # Check if extracted files already exist in path input_file = find_file(path, input_filename) target_file = find_file(path, target_filename) if input_file and target_file: logging.info("Already downloaded and extracted %s." % url) return input_file, target_file # Download archive file if it doesn't already exist. compressed_file = download_from_url(path, url) # Extract compressed files logging.info("Extracting %s." % compressed_file) with tarfile.open(compressed_file, "r:gz") as corpus_tar: corpus_tar.extractall(path) # Return filepaths of the requested files. input_file = find_file(path, input_filename) target_file = find_file(path, target_filename) if input_file and target_file: return input_file, target_file raise OSError("Download/extraction failed for url %s to path %s" % (url, path))
def _sync_directories(from_directory, to_directory): """Sync to_directory with from_directory by copying each file in to_directory with new contents. Files in to_directory will be overwritten by files of the same name in from_directory. We need to keep two copies of the log directory because otherwise TensorBoard picks up temp files from `aws s3 sync` and then stops reading the correct tfevent files. We walk the directory and copy each file individually because the directory that TensorBoard watches needs to always exist. Args: from_directory (str): The directory with updated files. to_directory (str): The directory to be synced. """ if not os.path.exists(to_directory): os.mkdir(to_directory) for root, dirs, files in os.walk(from_directory): to_root = root.replace(from_directory, to_directory) for directory in dirs: to_child_dir = os.path.join(to_root, directory) if not os.path.exists(to_child_dir): os.mkdir(to_child_dir) for fname in files: from_file = os.path.join(root, fname) to_file = os.path.join(to_root, fname) with open(from_file, 'rb') as a, open(to_file, 'wb') as b: b.write(a.read())
Sync to_directory with from_directory by copying each file in to_directory with new contents. Files in to_directory will be overwritten by files of the same name in from_directory. We need to keep two copies of the log directory because otherwise TensorBoard picks up temp files from `aws s3 sync` and then stops reading the correct tfevent files. We walk the directory and copy each file individually because the directory that TensorBoard watches needs to always exist. Args: from_directory (str): The directory with updated files. to_directory (str): The directory to be synced.
Below is the the instruction that describes the task: ### Input: Sync to_directory with from_directory by copying each file in to_directory with new contents. Files in to_directory will be overwritten by files of the same name in from_directory. We need to keep two copies of the log directory because otherwise TensorBoard picks up temp files from `aws s3 sync` and then stops reading the correct tfevent files. We walk the directory and copy each file individually because the directory that TensorBoard watches needs to always exist. Args: from_directory (str): The directory with updated files. to_directory (str): The directory to be synced. ### Response: def _sync_directories(from_directory, to_directory): """Sync to_directory with from_directory by copying each file in to_directory with new contents. Files in to_directory will be overwritten by files of the same name in from_directory. We need to keep two copies of the log directory because otherwise TensorBoard picks up temp files from `aws s3 sync` and then stops reading the correct tfevent files. We walk the directory and copy each file individually because the directory that TensorBoard watches needs to always exist. Args: from_directory (str): The directory with updated files. to_directory (str): The directory to be synced. """ if not os.path.exists(to_directory): os.mkdir(to_directory) for root, dirs, files in os.walk(from_directory): to_root = root.replace(from_directory, to_directory) for directory in dirs: to_child_dir = os.path.join(to_root, directory) if not os.path.exists(to_child_dir): os.mkdir(to_child_dir) for fname in files: from_file = os.path.join(root, fname) to_file = os.path.join(to_root, fname) with open(from_file, 'rb') as a, open(to_file, 'wb') as b: b.write(a.read())
def regexp(__string: str, __pattern: str, __repl: Union[Callable, str], *, count: int = 0, flags: int = 0) -> str: """Jinja filter for regexp replacements. See :func:`re.sub` for documentation. Returns: Text with substitutions applied """ return re.sub(__pattern, __repl, __string, count, flags)
Jinja filter for regexp replacements. See :func:`re.sub` for documentation. Returns: Text with substitutions applied
Below is the the instruction that describes the task: ### Input: Jinja filter for regexp replacements. See :func:`re.sub` for documentation. Returns: Text with substitutions applied ### Response: def regexp(__string: str, __pattern: str, __repl: Union[Callable, str], *, count: int = 0, flags: int = 0) -> str: """Jinja filter for regexp replacements. See :func:`re.sub` for documentation. Returns: Text with substitutions applied """ return re.sub(__pattern, __repl, __string, count, flags)
def engage(self, **kwargs): ''' Move the magnet to either: the default height for the labware loaded on magdeck [engage()] or a +/- 'offset' from the default height for the labware [engage(offset=2)] or a 'height' value specified as mm from magdeck home position [engage(height=20)] ''' if 'height' in kwargs: height = kwargs.get('height') else: height = LABWARE_ENGAGE_HEIGHT.get( self.labware.get_children_list()[1].get_name()) if not height: raise ValueError( 'No engage height definition found for {}. Provide a' 'custom height instead'.format( self.labware.get_children_list()[1].get_name())) if 'offset' in kwargs: height += kwargs.get('offset') if height > MAX_ENGAGE_HEIGHT or height < 0: raise ValueError('Invalid engage height. Should be 0 to {}'.format( MAX_ENGAGE_HEIGHT)) if self._driver and self._driver.is_connected(): self._driver.move(height) self._engaged = True
Move the magnet to either: the default height for the labware loaded on magdeck [engage()] or a +/- 'offset' from the default height for the labware [engage(offset=2)] or a 'height' value specified as mm from magdeck home position [engage(height=20)]
Below is the the instruction that describes the task: ### Input: Move the magnet to either: the default height for the labware loaded on magdeck [engage()] or a +/- 'offset' from the default height for the labware [engage(offset=2)] or a 'height' value specified as mm from magdeck home position [engage(height=20)] ### Response: def engage(self, **kwargs): ''' Move the magnet to either: the default height for the labware loaded on magdeck [engage()] or a +/- 'offset' from the default height for the labware [engage(offset=2)] or a 'height' value specified as mm from magdeck home position [engage(height=20)] ''' if 'height' in kwargs: height = kwargs.get('height') else: height = LABWARE_ENGAGE_HEIGHT.get( self.labware.get_children_list()[1].get_name()) if not height: raise ValueError( 'No engage height definition found for {}. Provide a' 'custom height instead'.format( self.labware.get_children_list()[1].get_name())) if 'offset' in kwargs: height += kwargs.get('offset') if height > MAX_ENGAGE_HEIGHT or height < 0: raise ValueError('Invalid engage height. Should be 0 to {}'.format( MAX_ENGAGE_HEIGHT)) if self._driver and self._driver.is_connected(): self._driver.move(height) self._engaged = True
def get_unit(unit_id, **kwargs): """ Returns a single unit """ try: unit = db.DBSession.query(Unit).filter(Unit.id==unit_id).one() return JSONObject(unit) except NoResultFound: # The dimension does not exist raise ResourceNotFoundError("Unit %s not found"%(unit_id))
Returns a single unit
Below is the the instruction that describes the task: ### Input: Returns a single unit ### Response: def get_unit(unit_id, **kwargs): """ Returns a single unit """ try: unit = db.DBSession.query(Unit).filter(Unit.id==unit_id).one() return JSONObject(unit) except NoResultFound: # The dimension does not exist raise ResourceNotFoundError("Unit %s not found"%(unit_id))
def forum_post_create(self, topic_id, body): """Create a forum post (Requires login). Parameters: topic_id (int): body (str): Post content. """ params = {'forum_post[topic_id]': topic_id, 'forum_post[body]': body} return self._get('forum_posts.json', params, method='POST', auth=True)
Create a forum post (Requires login). Parameters: topic_id (int): body (str): Post content.
Below is the the instruction that describes the task: ### Input: Create a forum post (Requires login). Parameters: topic_id (int): body (str): Post content. ### Response: def forum_post_create(self, topic_id, body): """Create a forum post (Requires login). Parameters: topic_id (int): body (str): Post content. """ params = {'forum_post[topic_id]': topic_id, 'forum_post[body]': body} return self._get('forum_posts.json', params, method='POST', auth=True)
def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response)
Execute the HTTP request to the API
Below is the the instruction that describes the task: ### Input: Execute the HTTP request to the API ### Response: def query(conn_type, option, post_data=None): ''' Execute the HTTP request to the API ''' if ticket is None or csrf is None or url is None: log.debug('Not authenticated yet, doing that now..') _authenticate() full_url = 'https://{0}:{1}/api2/json/{2}'.format(url, port, option) log.debug('%s: %s (%s)', conn_type, full_url, post_data) httpheaders = {'Accept': 'application/json', 'Content-Type': 'application/x-www-form-urlencoded', 'User-Agent': 'salt-cloud-proxmox'} if conn_type == 'post': httpheaders['CSRFPreventionToken'] = csrf response = requests.post(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'put': httpheaders['CSRFPreventionToken'] = csrf response = requests.put(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'delete': httpheaders['CSRFPreventionToken'] = csrf response = requests.delete(full_url, verify=verify_ssl, data=post_data, cookies=ticket, headers=httpheaders) elif conn_type == 'get': response = requests.get(full_url, verify=verify_ssl, cookies=ticket) response.raise_for_status() try: returned_data = response.json() if 'data' not in returned_data: raise SaltCloudExecutionFailure return returned_data['data'] except Exception: log.error('Error in trying to process JSON') log.error(response)
def subscriptions_unread(self, room_id, **kwargs): """Mark messages as unread by roomId or from a message""" return self.__call_api_post('subscriptions.unread', roomId=room_id, kwargs=kwargs)
Mark messages as unread by roomId or from a message
Below is the the instruction that describes the task: ### Input: Mark messages as unread by roomId or from a message ### Response: def subscriptions_unread(self, room_id, **kwargs): """Mark messages as unread by roomId or from a message""" return self.__call_api_post('subscriptions.unread', roomId=room_id, kwargs=kwargs)
def get_decor(self, c, match_only=None): """ Get the decor for a component. Args: c (component): The component to look up. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: Decor. The matching Decor from the Legend, or None if not found. """ if isinstance(c, Component): if c: if match_only: # Filter the component only those attributes c = Component({k: getattr(c, k, None) for k in match_only}) for decor in self.__list: try: if c == decor.component: return decor except AttributeError: continue else: for decor in self.__list: try: if getattr(c, 'mnemonic').lower() == decor.curve.mnemonic: return decor except AttributeError: continue return Decor({'colour': '#eeeeee', 'component': Component()})
Get the decor for a component. Args: c (component): The component to look up. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: Decor. The matching Decor from the Legend, or None if not found.
Below is the the instruction that describes the task: ### Input: Get the decor for a component. Args: c (component): The component to look up. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: Decor. The matching Decor from the Legend, or None if not found. ### Response: def get_decor(self, c, match_only=None): """ Get the decor for a component. Args: c (component): The component to look up. match_only (list of str): The component attributes to include in the comparison. Default: All of them. Returns: Decor. The matching Decor from the Legend, or None if not found. """ if isinstance(c, Component): if c: if match_only: # Filter the component only those attributes c = Component({k: getattr(c, k, None) for k in match_only}) for decor in self.__list: try: if c == decor.component: return decor except AttributeError: continue else: for decor in self.__list: try: if getattr(c, 'mnemonic').lower() == decor.curve.mnemonic: return decor except AttributeError: continue return Decor({'colour': '#eeeeee', 'component': Component()})
def charge(self, code, each_amount, quantity=1, description=None): ''' Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports. ''' each_amount = Decimal(each_amount) each_amount = each_amount.quantize(Decimal('.01')) data = { 'chargeCode': code, 'eachAmount': '%.2f' % each_amount, 'quantity': quantity, } if description: data['description'] = description response = self.product.client.make_request( path='customers/add-charge', params={'code': self.code}, data=data, ) return self.load_data_from_xml(response.content)
Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports.
Below is the the instruction that describes the task: ### Input: Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports. ### Response: def charge(self, code, each_amount, quantity=1, description=None): ''' Add an arbitrary charge or credit to a customer's account. A positive number will create a charge. A negative number will create a credit. each_amount is normalized to a Decimal with a precision of 2 as that is the level of precision which the cheddar API supports. ''' each_amount = Decimal(each_amount) each_amount = each_amount.quantize(Decimal('.01')) data = { 'chargeCode': code, 'eachAmount': '%.2f' % each_amount, 'quantity': quantity, } if description: data['description'] = description response = self.product.client.make_request( path='customers/add-charge', params={'code': self.code}, data=data, ) return self.load_data_from_xml(response.content)
def rotational_stiffness(sl, fd, axis="length", a0=0.0, **kwargs): """ Rotation stiffness of foundation. :param fd: Foundation object :param sl: Soil Object. :param axis: The axis which it should be computed around :return: """ if not kwargs.get("disable_requires", False): gf.models.check_required(sl, ["g_mod", "poissons_ratio"]) gf.models.check_required(fd, ["length", "width", "depth"]) if fd.depth > 0.0: pass l = fd.length * 0.5 b = fd.width * 0.5 v = sl.poissons_ratio if axis == "length": i_bx = fd.i_ll k_rx = 1 - 0.2 * a0 k_f_0 = (sl.g_mod / (1 - v) * i_bx ** 0.75 * (l / b) ** 0.25 * (2.4 + 0.5 * (b / l))) * k_rx else: i_by = fd.i_ww k_ry = 1 - 0.3 * a0 k_f_0 = (sl.g_mod / (1 - v) * i_by ** 0.75 * (3 * (l / b) ** 0.15)) * k_ry return k_f_0
Rotation stiffness of foundation. :param fd: Foundation object :param sl: Soil Object. :param axis: The axis which it should be computed around :return:
Below is the the instruction that describes the task: ### Input: Rotation stiffness of foundation. :param fd: Foundation object :param sl: Soil Object. :param axis: The axis which it should be computed around :return: ### Response: def rotational_stiffness(sl, fd, axis="length", a0=0.0, **kwargs): """ Rotation stiffness of foundation. :param fd: Foundation object :param sl: Soil Object. :param axis: The axis which it should be computed around :return: """ if not kwargs.get("disable_requires", False): gf.models.check_required(sl, ["g_mod", "poissons_ratio"]) gf.models.check_required(fd, ["length", "width", "depth"]) if fd.depth > 0.0: pass l = fd.length * 0.5 b = fd.width * 0.5 v = sl.poissons_ratio if axis == "length": i_bx = fd.i_ll k_rx = 1 - 0.2 * a0 k_f_0 = (sl.g_mod / (1 - v) * i_bx ** 0.75 * (l / b) ** 0.25 * (2.4 + 0.5 * (b / l))) * k_rx else: i_by = fd.i_ww k_ry = 1 - 0.3 * a0 k_f_0 = (sl.g_mod / (1 - v) * i_by ** 0.75 * (3 * (l / b) ** 0.15)) * k_ry return k_f_0
def generate_password(self) -> list: """Generate a list of random characters.""" characterset = self._get_password_characters() if ( self.passwordlen is None or not characterset ): raise ValueError("Can't generate password: character set is " "empty or passwordlen isn't set") password = [] for _ in range(0, self.passwordlen): password.append(randchoice(characterset)) self.last_result = password return password
Generate a list of random characters.
Below is the the instruction that describes the task: ### Input: Generate a list of random characters. ### Response: def generate_password(self) -> list: """Generate a list of random characters.""" characterset = self._get_password_characters() if ( self.passwordlen is None or not characterset ): raise ValueError("Can't generate password: character set is " "empty or passwordlen isn't set") password = [] for _ in range(0, self.passwordlen): password.append(randchoice(characterset)) self.last_result = password return password
def seek_file_end(file): '''Seek to the end of the file.''' try: file.seek(0, 2) except ValueError: # gzip files don't support seek from end while True: data = file.read(4096) if not data: break
Seek to the end of the file.
Below is the the instruction that describes the task: ### Input: Seek to the end of the file. ### Response: def seek_file_end(file): '''Seek to the end of the file.''' try: file.seek(0, 2) except ValueError: # gzip files don't support seek from end while True: data = file.read(4096) if not data: break
def refresh_schema(self, exclude_system_tables=True, use_cache=False): """ Pulls your database's schema again and looks for any new tables and columns. """ col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1. load directly from cache # 2. use a single query for getting all key relationships # 3. use the naive approach if use_cache: # generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref key query by doing it one time, database-wide, if query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write("done!\n")
Pulls your database's schema again and looks for any new tables and columns.
Below is the the instruction that describes the task: ### Input: Pulls your database's schema again and looks for any new tables and columns. ### Response: def refresh_schema(self, exclude_system_tables=True, use_cache=False): """ Pulls your database's schema again and looks for any new tables and columns. """ col_meta, table_meta = self._get_db_metadata(exclude_system_tables, use_cache) tables = self._gen_tables_from_col_tuples(col_meta) # Three modes for refreshing schema # 1. load directly from cache # 2. use a single query for getting all key relationships # 3. use the naive approach if use_cache: # generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, table_meta[t]['schema'], t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_meta[t]['foreign_keys']['columns'], ref_keys=table_meta[t]['ref_keys']['columns']) for t in sorted(tables.keys())]) # optimize the foreign/ref key query by doing it one time, database-wide, if query is available elif not use_cache and isinstance(self._query_templates.get('system', {}).get('foreign_keys_for_db', None), str): self.cur.execute(self._query_templates['system']['foreign_keys_for_db']) table_db_foreign_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple is the table name table_db_foreign_keys[rel[1]].append(rel) self.cur.execute(self._query_templates['system']['ref_keys_for_db']) table_db_ref_keys = defaultdict(list) for rel in self.cur: # second value in relationship tuple is the table name table_db_ref_keys[rel[1]].append(rel) # generate our Tables, and load them into a TableSet self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column, foreign_keys=table_db_foreign_keys[t], ref_keys=table_db_ref_keys[t]) for t in sorted(tables.keys())]) elif not use_cache: self._tables = TableSet([Table(self.con, self._query_templates, tables[t][0].schema, t, tables[t], keys_per_column=self.keys_per_column) for t in sorted(tables.keys())]) sys.stderr.write("done!\n")
def insert(self, s): ''' Insert string @s at the current cursor location. ''' for c in s: self.text.insert(self.cursor_loc, c) self.cursor_loc += 1
Insert string @s at the current cursor location.
Below is the the instruction that describes the task: ### Input: Insert string @s at the current cursor location. ### Response: def insert(self, s): ''' Insert string @s at the current cursor location. ''' for c in s: self.text.insert(self.cursor_loc, c) self.cursor_loc += 1
def enum(name, *members, **withvalue): """class buider""" if len(members) == 1: if isinstance(members[0], str): members = members[0].split() elif isinstance(members[0], (list, tuple)): members = members[0] dic = {v: v for v in members} dic.update(withvalue) return type(name, (Enum,), dic)
class buider
Below is the the instruction that describes the task: ### Input: class buider ### Response: def enum(name, *members, **withvalue): """class buider""" if len(members) == 1: if isinstance(members[0], str): members = members[0].split() elif isinstance(members[0], (list, tuple)): members = members[0] dic = {v: v for v in members} dic.update(withvalue) return type(name, (Enum,), dic)
def host_is_trusted(hostname, trusted_list): """Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well. """ if not hostname: return False if isinstance(trusted_list, string_types): trusted_list = [trusted_list] def _normalize(hostname): if ':' in hostname: hostname = hostname.rsplit(':', 1)[0] return _encode_idna(hostname) hostname = _normalize(hostname) for ref in trusted_list: if ref.startswith('.'): ref = ref[1:] suffix_match = True else: suffix_match = False ref = _normalize(ref) if ref == hostname: return True if suffix_match and hostname.endswith('.' + ref): return True return False
Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well.
Below is the the instruction that describes the task: ### Input: Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well. ### Response: def host_is_trusted(hostname, trusted_list): """Checks if a host is trusted against a list. This also takes care of port normalization. .. versionadded:: 0.9 :param hostname: the hostname to check :param trusted_list: a list of hostnames to check against. If a hostname starts with a dot it will match against all subdomains as well. """ if not hostname: return False if isinstance(trusted_list, string_types): trusted_list = [trusted_list] def _normalize(hostname): if ':' in hostname: hostname = hostname.rsplit(':', 1)[0] return _encode_idna(hostname) hostname = _normalize(hostname) for ref in trusted_list: if ref.startswith('.'): ref = ref[1:] suffix_match = True else: suffix_match = False ref = _normalize(ref) if ref == hostname: return True if suffix_match and hostname.endswith('.' + ref): return True return False
def fill_main_goids(go2obj, goids): """Ensure main GO IDs are included in go2obj.""" # User GO IDs (goids) may be either main GO IDs or alternate GO IDs. for goid in goids: goobj = go2obj[goid] # If a user specified an ALT GO ID and main GO ID not in go2obj: if goid != goobj.id and goobj.id not in go2obj: # Add main GO ID to go2obj go2obj[goobj.id] = goobj
Ensure main GO IDs are included in go2obj.
Below is the the instruction that describes the task: ### Input: Ensure main GO IDs are included in go2obj. ### Response: def fill_main_goids(go2obj, goids): """Ensure main GO IDs are included in go2obj.""" # User GO IDs (goids) may be either main GO IDs or alternate GO IDs. for goid in goids: goobj = go2obj[goid] # If a user specified an ALT GO ID and main GO ID not in go2obj: if goid != goobj.id and goobj.id not in go2obj: # Add main GO ID to go2obj go2obj[goobj.id] = goobj
def get_json_ident(request_headers: dict) -> int: """ Defines whether the JSON response will be indented or not :param request_headers: dict :return: self """ if 'HTTP_USER_AGENT' in request_headers: indent = 2 if re.match("[Mozilla]{7}", request_headers['HTTP_USER_AGENT']) else 0 else: indent = 0 return indent
Defines whether the JSON response will be indented or not :param request_headers: dict :return: self
Below is the the instruction that describes the task: ### Input: Defines whether the JSON response will be indented or not :param request_headers: dict :return: self ### Response: def get_json_ident(request_headers: dict) -> int: """ Defines whether the JSON response will be indented or not :param request_headers: dict :return: self """ if 'HTTP_USER_AGENT' in request_headers: indent = 2 if re.match("[Mozilla]{7}", request_headers['HTTP_USER_AGENT']) else 0 else: indent = 0 return indent
def accept(self, value): """ Add an Accept option to a request. :param value: the Accept value """ if value in list(defines.Content_types.values()): option = Option() option.number = defines.OptionRegistry.ACCEPT.number option.value = value self.add_option(option)
Add an Accept option to a request. :param value: the Accept value
Below is the the instruction that describes the task: ### Input: Add an Accept option to a request. :param value: the Accept value ### Response: def accept(self, value): """ Add an Accept option to a request. :param value: the Accept value """ if value in list(defines.Content_types.values()): option = Option() option.number = defines.OptionRegistry.ACCEPT.number option.value = value self.add_option(option)
def verify(self, **kwargs): """ Verify a message with an account's memo key :param str account: (optional) the account that owns the bet (defaults to ``default_account``) :returns: True if the message is verified successfully :raises InvalidMessageSignature if the signature is not ok """ # Split message into its parts parts = re.split("|".join(self.MESSAGE_SPLIT), self.message) parts = [x for x in parts if x.strip()] assert len(parts) > 2, "Incorrect number of message parts" # Strip away all whitespaces before and after the message message = parts[0].strip() signature = parts[2].strip() # Parse the meta data meta = dict(re.findall(r"(\S+)=(.*)", parts[1])) log.info("Message is: {}".format(message)) log.info("Meta is: {}".format(json.dumps(meta))) log.info("Signature is: {}".format(signature)) # Ensure we have all the data in meta assert "account" in meta, "No 'account' could be found in meta data" assert "memokey" in meta, "No 'memokey' could be found in meta data" assert "block" in meta, "No 'block' could be found in meta data" assert "timestamp" in meta, "No 'timestamp' could be found in meta data" account_name = meta.get("account").strip() memo_key = meta["memokey"].strip() try: self.publickey_class(memo_key, prefix=self.blockchain.prefix) except Exception: raise InvalidMemoKeyException("The memo key in the message is invalid") # Load account from blockchain try: account = self.account_class( account_name, blockchain_instance=self.blockchain ) except AccountDoesNotExistsException: raise AccountDoesNotExistsException( "Could not find account {}. Are you connected to the right chain?".format( account_name ) ) # Test if memo key is the same as on the blockchain if not account["options"]["memo_key"] == memo_key: raise WrongMemoKey( "Memo Key of account {} on the Blockchain ".format(account["name"]) + "differs from memo key in the message: {} != {}".format( account["options"]["memo_key"], memo_key ) ) # Reformat message enc_message = self.SIGNED_MESSAGE_META.format(**locals()) # Verify Signature pubkey = verify_message(enc_message, unhexlify(signature)) # Verify pubky pk = self.publickey_class( hexlify(pubkey).decode("ascii"), prefix=self.blockchain.prefix ) if format(pk, self.blockchain.prefix) != memo_key: raise InvalidMessageSignature("The signature doesn't match the memo key") self.signed_by_account = account self.signed_by_name = account["name"] self.meta = meta self.plain_message = message return True
Verify a message with an account's memo key :param str account: (optional) the account that owns the bet (defaults to ``default_account``) :returns: True if the message is verified successfully :raises InvalidMessageSignature if the signature is not ok
Below is the the instruction that describes the task: ### Input: Verify a message with an account's memo key :param str account: (optional) the account that owns the bet (defaults to ``default_account``) :returns: True if the message is verified successfully :raises InvalidMessageSignature if the signature is not ok ### Response: def verify(self, **kwargs): """ Verify a message with an account's memo key :param str account: (optional) the account that owns the bet (defaults to ``default_account``) :returns: True if the message is verified successfully :raises InvalidMessageSignature if the signature is not ok """ # Split message into its parts parts = re.split("|".join(self.MESSAGE_SPLIT), self.message) parts = [x for x in parts if x.strip()] assert len(parts) > 2, "Incorrect number of message parts" # Strip away all whitespaces before and after the message message = parts[0].strip() signature = parts[2].strip() # Parse the meta data meta = dict(re.findall(r"(\S+)=(.*)", parts[1])) log.info("Message is: {}".format(message)) log.info("Meta is: {}".format(json.dumps(meta))) log.info("Signature is: {}".format(signature)) # Ensure we have all the data in meta assert "account" in meta, "No 'account' could be found in meta data" assert "memokey" in meta, "No 'memokey' could be found in meta data" assert "block" in meta, "No 'block' could be found in meta data" assert "timestamp" in meta, "No 'timestamp' could be found in meta data" account_name = meta.get("account").strip() memo_key = meta["memokey"].strip() try: self.publickey_class(memo_key, prefix=self.blockchain.prefix) except Exception: raise InvalidMemoKeyException("The memo key in the message is invalid") # Load account from blockchain try: account = self.account_class( account_name, blockchain_instance=self.blockchain ) except AccountDoesNotExistsException: raise AccountDoesNotExistsException( "Could not find account {}. Are you connected to the right chain?".format( account_name ) ) # Test if memo key is the same as on the blockchain if not account["options"]["memo_key"] == memo_key: raise WrongMemoKey( "Memo Key of account {} on the Blockchain ".format(account["name"]) + "differs from memo key in the message: {} != {}".format( account["options"]["memo_key"], memo_key ) ) # Reformat message enc_message = self.SIGNED_MESSAGE_META.format(**locals()) # Verify Signature pubkey = verify_message(enc_message, unhexlify(signature)) # Verify pubky pk = self.publickey_class( hexlify(pubkey).decode("ascii"), prefix=self.blockchain.prefix ) if format(pk, self.blockchain.prefix) != memo_key: raise InvalidMessageSignature("The signature doesn't match the memo key") self.signed_by_account = account self.signed_by_name = account["name"] self.meta = meta self.plain_message = message return True
def Deserialize(self, reader: BinaryReader): """ Deserialize full object. Args: reader (neocore.IO.BinaryReader): """ super(ValidatorState, self).Deserialize(reader) self.PublicKey = ECDSA.Deserialize_Secp256r1(reader) self.Registered = reader.ReadBool() self.Votes = reader.ReadFixed8()
Deserialize full object. Args: reader (neocore.IO.BinaryReader):
Below is the the instruction that describes the task: ### Input: Deserialize full object. Args: reader (neocore.IO.BinaryReader): ### Response: def Deserialize(self, reader: BinaryReader): """ Deserialize full object. Args: reader (neocore.IO.BinaryReader): """ super(ValidatorState, self).Deserialize(reader) self.PublicKey = ECDSA.Deserialize_Secp256r1(reader) self.Registered = reader.ReadBool() self.Votes = reader.ReadFixed8()
def indent_text(string, indent_level=2): """Indent every line of text in a newline-delimited string""" indented_lines = [] indent_spaces = ' ' * indent_level for line in string.split('\n'): indented_lines.append(indent_spaces + line) return '\n'.join(indented_lines)
Indent every line of text in a newline-delimited string
Below is the the instruction that describes the task: ### Input: Indent every line of text in a newline-delimited string ### Response: def indent_text(string, indent_level=2): """Indent every line of text in a newline-delimited string""" indented_lines = [] indent_spaces = ' ' * indent_level for line in string.split('\n'): indented_lines.append(indent_spaces + line) return '\n'.join(indented_lines)
def perform_permissions_check(self, user, obj, perms): """ Performs the permissions check. """ return self.request.forum_permission_handler.can_delete_topics(obj, user)
Performs the permissions check.
Below is the the instruction that describes the task: ### Input: Performs the permissions check. ### Response: def perform_permissions_check(self, user, obj, perms): """ Performs the permissions check. """ return self.request.forum_permission_handler.can_delete_topics(obj, user)
async def handle_player_update(self, state: "node.PlayerState"): """ Handles player updates from lavalink. Parameters ---------- state : websocket.PlayerState """ if state.position > self.position: self._is_playing = True self.position = state.position
Handles player updates from lavalink. Parameters ---------- state : websocket.PlayerState
Below is the the instruction that describes the task: ### Input: Handles player updates from lavalink. Parameters ---------- state : websocket.PlayerState ### Response: async def handle_player_update(self, state: "node.PlayerState"): """ Handles player updates from lavalink. Parameters ---------- state : websocket.PlayerState """ if state.position > self.position: self._is_playing = True self.position = state.position
def _scatter_obs( adata, x=None, y=None, color=None, use_raw=None, layers='X', sort_order=True, alpha=None, basis=None, groups=None, components=None, projection='2d', legend_loc='right margin', legend_fontsize=None, legend_fontweight=None, color_map=None, palette=None, frameon=None, right_margin=None, left_margin=None, size=None, title=None, show=None, save=None, ax=None): """See docstring of scatter.""" sanitize_anndata(adata) from scipy.sparse import issparse if use_raw is None and adata.raw is not None: use_raw = True # process layers if layers is None: layers = 'X' if isinstance(layers, str) and (layers == 'X' or layers in adata.layers.keys()): layers = (layers, layers, layers) elif isinstance(layers, (tuple, list)) and len(layers) == 3: for layer in layers: if layer not in adata.layers.keys() and layer != 'X': raise ValueError( '`layers` should have elements that are either \'X\' or in adata.layers.keys().') else: raise ValueError('`layers` should be a string or a list/tuple of length 3.') if use_raw and (layers != ('X', 'X', 'X') or layers != ['X', 'X', 'X']): ValueError('`use_raw` must be `False` if layers other than \'X\' are used.') if legend_loc not in VALID_LEGENDLOCS: raise ValueError( 'Invalid `legend_loc`, need to be one of: {}.'.format(VALID_LEGENDLOCS)) if components is None: components = '1,2' if '2d' in projection else '1,2,3' if isinstance(components, str): components = components.split(',') components = np.array(components).astype(int) - 1 keys = ['grey'] if color is None else [color] if isinstance(color, str) else color if title is not None and isinstance(title, str): title = [title] highlights = adata.uns['highlights'] if 'highlights' in adata.uns else [] if basis is not None: try: # ignore the '0th' diffusion component if basis == 'diffmap': components += 1 Y = adata.obsm['X_' + basis][:, components] # correct the component vector for use in labeling etc. if basis == 'diffmap': components -= 1 except KeyError: raise KeyError('compute coordinates using visualization tool {} first' .format(basis)) elif x is not None and y is not None: x_arr = adata._get_obs_array(x, use_raw=use_raw, layer=layers[0]) y_arr = adata._get_obs_array(y, use_raw=use_raw, layer=layers[1]) x_arr = x_arr.toarray().flatten() if issparse(x_arr) else x_arr y_arr = y_arr.toarray().flatten() if issparse(y_arr) else y_arr Y = np.c_[x_arr[:, None], y_arr[:, None]] else: raise ValueError('Either provide a `basis` or `x` and `y`.') if size is None: n = Y.shape[0] size = 120000 / n if legend_loc.startswith('on data') and legend_fontsize is None: legend_fontsize = rcParams['legend.fontsize'] elif legend_fontsize is None: legend_fontsize = rcParams['legend.fontsize'] palette_was_none = False if palette is None: palette_was_none = True if isinstance(palette, list): if not is_color_like(palette[0]): palettes = palette else: palettes = [palette] else: palettes = [palette for i in range(len(keys))] for i, palette in enumerate(palettes): palettes[i] = utils.default_palette(palette) if basis is not None: component_name = ( 'DC' if basis == 'diffmap' else 'tSNE' if basis == 'tsne' else 'UMAP' if basis == 'umap' else 'PC' if basis == 'pca' else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis else basis) else: component_name = None axis_labels = (x, y) if component_name is None else None show_ticks = True if component_name is None else False # generate the colors color_ids = [] categoricals = [] colorbars = [] for ikey, key in enumerate(keys): c = 'white' categorical = False # by default, assume continuous or flat color colorbar = None # test whether we have categorial or continuous annotation if key in adata.obs_keys(): if is_categorical_dtype(adata.obs[key]): categorical = True else: c = adata.obs[key] # coloring according to gene expression elif (use_raw and adata.raw is not None and key in adata.raw.var_names): c = adata.raw[:, key].X elif key in adata.var_names: c = adata[:, key].X if layers[2] == 'X' else adata[:, key].layers[layers[2]] c = c.toarray().flatten() if issparse(c) else c elif is_color_like(key): # a flat color c = key colorbar = False else: raise ValueError( 'key \'{}\' is invalid! pass valid observation annotation, ' 'one of {} or a gene name {}' .format(key, adata.obs_keys(), adata.var_names)) if colorbar is None: colorbar = not categorical colorbars.append(colorbar) if categorical: categoricals.append(ikey) color_ids.append(c) if right_margin is None and len(categoricals) > 0: if legend_loc == 'right margin': right_margin = 0.5 if title is None and keys[0] is not None: title = [key.replace('_', ' ') if not is_color_like(key) else '' for key in keys] axs = scatter_base(Y, title=title, alpha=alpha, component_name=component_name, axis_labels=axis_labels, component_indexnames=components + 1, projection=projection, colors=color_ids, highlights=highlights, colorbars=colorbars, right_margin=right_margin, left_margin=left_margin, sizes=[size for c in keys], color_map=color_map, show_ticks=show_ticks, ax=ax) def add_centroid(centroids, name, Y, mask): Y_mask = Y[mask] if Y_mask.shape[0] == 0: return median = np.median(Y_mask, axis=0) i = np.argmin(np.sum(np.abs(Y_mask - median), axis=1)) centroids[name] = Y_mask[i] # loop over all categorical annotation and plot it for i, ikey in enumerate(categoricals): palette = palettes[i] key = keys[ikey] utils.add_colors_for_categorical_sample_annotation( adata, key, palette, force_update_colors=not palette_was_none) # actually plot the groups mask_remaining = np.ones(Y.shape[0], dtype=bool) centroids = {} if groups is None: for iname, name in enumerate(adata.obs[key].cat.categories): if name not in settings.categories_to_ignore: mask = scatter_group(axs[ikey], key, iname, adata, Y, projection, size=size, alpha=alpha) mask_remaining[mask] = False if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask) else: groups = [groups] if isinstance(groups, str) else groups for name in groups: if name not in set(adata.obs[key].cat.categories): raise ValueError('"' + name + '" is invalid!' + ' specify valid name, one of ' + str(adata.obs[key].cat.categories)) else: iname = np.flatnonzero(adata.obs[key].cat.categories.values == name)[0] mask = scatter_group(axs[ikey], key, iname, adata, Y, projection, size=size, alpha=alpha) if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask) mask_remaining[mask] = False if mask_remaining.sum() > 0: data = [Y[mask_remaining, 0], Y[mask_remaining, 1]] if projection == '3d': data.append(Y[mask_remaining, 2]) axs[ikey].scatter(*data, marker='.', c='lightgrey', s=size, edgecolors='none', zorder=-1) legend = None if legend_loc.startswith('on data'): if legend_fontweight is None: legend_fontweight = 'bold' for name, pos in centroids.items(): axs[ikey].text(pos[0], pos[1], name, weight=legend_fontweight, verticalalignment='center', horizontalalignment='center', fontsize=legend_fontsize) all_pos = np.zeros((len(adata.obs[key].cat.categories), 2)) for iname, name in enumerate(adata.obs[key].cat.categories): if name in centroids: all_pos[iname] = centroids[name] else: all_pos[iname] = [np.nan, np.nan] utils._tmp_cluster_pos = all_pos if legend_loc == 'on data export': filename = settings.writedir + 'pos.csv' logg.msg('exporting label positions to {}'.format(filename), v=1) if settings.writedir != '' and not os.path.exists(settings.writedir): os.makedirs(settings.writedir) np.savetxt(filename, all_pos, delimiter=',') elif legend_loc == 'right margin': legend = axs[ikey].legend( frameon=False, loc='center left', bbox_to_anchor=(1, 0.5), ncol=(1 if len(adata.obs[key].cat.categories) <= 14 else 2 if len(adata.obs[key].cat.categories) <= 30 else 3), fontsize=legend_fontsize) elif legend_loc != 'none': legend = axs[ikey].legend( frameon=False, loc=legend_loc, fontsize=legend_fontsize) if legend is not None: for handle in legend.legendHandles: handle.set_sizes([300.0]) # draw a frame around the scatter frameon = settings._frameon if frameon is None else frameon if not frameon and x is None and y is None: for ax in axs: ax.set_xlabel('') ax.set_ylabel('') ax.set_frame_on(False) utils.savefig_or_show('scatter' if basis is None else basis, show=show, save=save) if show == False: return axs if len(keys) > 1 else axs[0]
See docstring of scatter.
Below is the the instruction that describes the task: ### Input: See docstring of scatter. ### Response: def _scatter_obs( adata, x=None, y=None, color=None, use_raw=None, layers='X', sort_order=True, alpha=None, basis=None, groups=None, components=None, projection='2d', legend_loc='right margin', legend_fontsize=None, legend_fontweight=None, color_map=None, palette=None, frameon=None, right_margin=None, left_margin=None, size=None, title=None, show=None, save=None, ax=None): """See docstring of scatter.""" sanitize_anndata(adata) from scipy.sparse import issparse if use_raw is None and adata.raw is not None: use_raw = True # process layers if layers is None: layers = 'X' if isinstance(layers, str) and (layers == 'X' or layers in adata.layers.keys()): layers = (layers, layers, layers) elif isinstance(layers, (tuple, list)) and len(layers) == 3: for layer in layers: if layer not in adata.layers.keys() and layer != 'X': raise ValueError( '`layers` should have elements that are either \'X\' or in adata.layers.keys().') else: raise ValueError('`layers` should be a string or a list/tuple of length 3.') if use_raw and (layers != ('X', 'X', 'X') or layers != ['X', 'X', 'X']): ValueError('`use_raw` must be `False` if layers other than \'X\' are used.') if legend_loc not in VALID_LEGENDLOCS: raise ValueError( 'Invalid `legend_loc`, need to be one of: {}.'.format(VALID_LEGENDLOCS)) if components is None: components = '1,2' if '2d' in projection else '1,2,3' if isinstance(components, str): components = components.split(',') components = np.array(components).astype(int) - 1 keys = ['grey'] if color is None else [color] if isinstance(color, str) else color if title is not None and isinstance(title, str): title = [title] highlights = adata.uns['highlights'] if 'highlights' in adata.uns else [] if basis is not None: try: # ignore the '0th' diffusion component if basis == 'diffmap': components += 1 Y = adata.obsm['X_' + basis][:, components] # correct the component vector for use in labeling etc. if basis == 'diffmap': components -= 1 except KeyError: raise KeyError('compute coordinates using visualization tool {} first' .format(basis)) elif x is not None and y is not None: x_arr = adata._get_obs_array(x, use_raw=use_raw, layer=layers[0]) y_arr = adata._get_obs_array(y, use_raw=use_raw, layer=layers[1]) x_arr = x_arr.toarray().flatten() if issparse(x_arr) else x_arr y_arr = y_arr.toarray().flatten() if issparse(y_arr) else y_arr Y = np.c_[x_arr[:, None], y_arr[:, None]] else: raise ValueError('Either provide a `basis` or `x` and `y`.') if size is None: n = Y.shape[0] size = 120000 / n if legend_loc.startswith('on data') and legend_fontsize is None: legend_fontsize = rcParams['legend.fontsize'] elif legend_fontsize is None: legend_fontsize = rcParams['legend.fontsize'] palette_was_none = False if palette is None: palette_was_none = True if isinstance(palette, list): if not is_color_like(palette[0]): palettes = palette else: palettes = [palette] else: palettes = [palette for i in range(len(keys))] for i, palette in enumerate(palettes): palettes[i] = utils.default_palette(palette) if basis is not None: component_name = ( 'DC' if basis == 'diffmap' else 'tSNE' if basis == 'tsne' else 'UMAP' if basis == 'umap' else 'PC' if basis == 'pca' else basis.replace('draw_graph_', '').upper() if 'draw_graph' in basis else basis) else: component_name = None axis_labels = (x, y) if component_name is None else None show_ticks = True if component_name is None else False # generate the colors color_ids = [] categoricals = [] colorbars = [] for ikey, key in enumerate(keys): c = 'white' categorical = False # by default, assume continuous or flat color colorbar = None # test whether we have categorial or continuous annotation if key in adata.obs_keys(): if is_categorical_dtype(adata.obs[key]): categorical = True else: c = adata.obs[key] # coloring according to gene expression elif (use_raw and adata.raw is not None and key in adata.raw.var_names): c = adata.raw[:, key].X elif key in adata.var_names: c = adata[:, key].X if layers[2] == 'X' else adata[:, key].layers[layers[2]] c = c.toarray().flatten() if issparse(c) else c elif is_color_like(key): # a flat color c = key colorbar = False else: raise ValueError( 'key \'{}\' is invalid! pass valid observation annotation, ' 'one of {} or a gene name {}' .format(key, adata.obs_keys(), adata.var_names)) if colorbar is None: colorbar = not categorical colorbars.append(colorbar) if categorical: categoricals.append(ikey) color_ids.append(c) if right_margin is None and len(categoricals) > 0: if legend_loc == 'right margin': right_margin = 0.5 if title is None and keys[0] is not None: title = [key.replace('_', ' ') if not is_color_like(key) else '' for key in keys] axs = scatter_base(Y, title=title, alpha=alpha, component_name=component_name, axis_labels=axis_labels, component_indexnames=components + 1, projection=projection, colors=color_ids, highlights=highlights, colorbars=colorbars, right_margin=right_margin, left_margin=left_margin, sizes=[size for c in keys], color_map=color_map, show_ticks=show_ticks, ax=ax) def add_centroid(centroids, name, Y, mask): Y_mask = Y[mask] if Y_mask.shape[0] == 0: return median = np.median(Y_mask, axis=0) i = np.argmin(np.sum(np.abs(Y_mask - median), axis=1)) centroids[name] = Y_mask[i] # loop over all categorical annotation and plot it for i, ikey in enumerate(categoricals): palette = palettes[i] key = keys[ikey] utils.add_colors_for_categorical_sample_annotation( adata, key, palette, force_update_colors=not palette_was_none) # actually plot the groups mask_remaining = np.ones(Y.shape[0], dtype=bool) centroids = {} if groups is None: for iname, name in enumerate(adata.obs[key].cat.categories): if name not in settings.categories_to_ignore: mask = scatter_group(axs[ikey], key, iname, adata, Y, projection, size=size, alpha=alpha) mask_remaining[mask] = False if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask) else: groups = [groups] if isinstance(groups, str) else groups for name in groups: if name not in set(adata.obs[key].cat.categories): raise ValueError('"' + name + '" is invalid!' + ' specify valid name, one of ' + str(adata.obs[key].cat.categories)) else: iname = np.flatnonzero(adata.obs[key].cat.categories.values == name)[0] mask = scatter_group(axs[ikey], key, iname, adata, Y, projection, size=size, alpha=alpha) if legend_loc.startswith('on data'): add_centroid(centroids, name, Y, mask) mask_remaining[mask] = False if mask_remaining.sum() > 0: data = [Y[mask_remaining, 0], Y[mask_remaining, 1]] if projection == '3d': data.append(Y[mask_remaining, 2]) axs[ikey].scatter(*data, marker='.', c='lightgrey', s=size, edgecolors='none', zorder=-1) legend = None if legend_loc.startswith('on data'): if legend_fontweight is None: legend_fontweight = 'bold' for name, pos in centroids.items(): axs[ikey].text(pos[0], pos[1], name, weight=legend_fontweight, verticalalignment='center', horizontalalignment='center', fontsize=legend_fontsize) all_pos = np.zeros((len(adata.obs[key].cat.categories), 2)) for iname, name in enumerate(adata.obs[key].cat.categories): if name in centroids: all_pos[iname] = centroids[name] else: all_pos[iname] = [np.nan, np.nan] utils._tmp_cluster_pos = all_pos if legend_loc == 'on data export': filename = settings.writedir + 'pos.csv' logg.msg('exporting label positions to {}'.format(filename), v=1) if settings.writedir != '' and not os.path.exists(settings.writedir): os.makedirs(settings.writedir) np.savetxt(filename, all_pos, delimiter=',') elif legend_loc == 'right margin': legend = axs[ikey].legend( frameon=False, loc='center left', bbox_to_anchor=(1, 0.5), ncol=(1 if len(adata.obs[key].cat.categories) <= 14 else 2 if len(adata.obs[key].cat.categories) <= 30 else 3), fontsize=legend_fontsize) elif legend_loc != 'none': legend = axs[ikey].legend( frameon=False, loc=legend_loc, fontsize=legend_fontsize) if legend is not None: for handle in legend.legendHandles: handle.set_sizes([300.0]) # draw a frame around the scatter frameon = settings._frameon if frameon is None else frameon if not frameon and x is None and y is None: for ax in axs: ax.set_xlabel('') ax.set_ylabel('') ax.set_frame_on(False) utils.savefig_or_show('scatter' if basis is None else basis, show=show, save=save) if show == False: return axs if len(keys) > 1 else axs[0]
def is_hash256(s): """ Returns True if the considered string is a valid SHA256 hash. """ if not s or not isinstance(s, str): return False return re.match('^[0-9A-F]{64}$', s.strip(), re.IGNORECASE)
Returns True if the considered string is a valid SHA256 hash.
Below is the the instruction that describes the task: ### Input: Returns True if the considered string is a valid SHA256 hash. ### Response: def is_hash256(s): """ Returns True if the considered string is a valid SHA256 hash. """ if not s or not isinstance(s, str): return False return re.match('^[0-9A-F]{64}$', s.strip(), re.IGNORECASE)
def write_mates(self): '''Scan the current chromosome for matches to any of the reads stored in the read1s buffer''' if self.chrom is not None: U.debug("Dumping %i mates for contig %s" % ( len(self.read1s), self.chrom)) for read in self.infile.fetch(reference=self.chrom, multiple_iterators=True): if any((read.is_unmapped, read.mate_is_unmapped, read.is_read1)): continue key = read.query_name, read.reference_name, read.reference_start if key in self.read1s: self.outfile.write(read) self.read1s.remove(key) U.debug("%i mates remaining" % len(self.read1s))
Scan the current chromosome for matches to any of the reads stored in the read1s buffer
Below is the the instruction that describes the task: ### Input: Scan the current chromosome for matches to any of the reads stored in the read1s buffer ### Response: def write_mates(self): '''Scan the current chromosome for matches to any of the reads stored in the read1s buffer''' if self.chrom is not None: U.debug("Dumping %i mates for contig %s" % ( len(self.read1s), self.chrom)) for read in self.infile.fetch(reference=self.chrom, multiple_iterators=True): if any((read.is_unmapped, read.mate_is_unmapped, read.is_read1)): continue key = read.query_name, read.reference_name, read.reference_start if key in self.read1s: self.outfile.write(read) self.read1s.remove(key) U.debug("%i mates remaining" % len(self.read1s))
async def _fetch_channel_sid(self): """Creates a new channel for receiving push data. Sending an empty forward channel request will create a new channel on the server. There's a separate API to get the gsessionid alone that Hangouts for Chrome uses, but if we don't send a gsessionid with this request, it will return a gsessionid as well as the SID. Raises hangups.NetworkError if the channel can not be created. """ logger.info('Requesting new gsessionid and SID...') # Set SID and gsessionid to None so they aren't sent in by send_maps. self._sid_param = None self._gsessionid_param = None res = await self.send_maps([]) self._sid_param, self._gsessionid_param = _parse_sid_response(res.body) logger.info('New SID: {}'.format(self._sid_param)) logger.info('New gsessionid: {}'.format(self._gsessionid_param))
Creates a new channel for receiving push data. Sending an empty forward channel request will create a new channel on the server. There's a separate API to get the gsessionid alone that Hangouts for Chrome uses, but if we don't send a gsessionid with this request, it will return a gsessionid as well as the SID. Raises hangups.NetworkError if the channel can not be created.
Below is the the instruction that describes the task: ### Input: Creates a new channel for receiving push data. Sending an empty forward channel request will create a new channel on the server. There's a separate API to get the gsessionid alone that Hangouts for Chrome uses, but if we don't send a gsessionid with this request, it will return a gsessionid as well as the SID. Raises hangups.NetworkError if the channel can not be created. ### Response: async def _fetch_channel_sid(self): """Creates a new channel for receiving push data. Sending an empty forward channel request will create a new channel on the server. There's a separate API to get the gsessionid alone that Hangouts for Chrome uses, but if we don't send a gsessionid with this request, it will return a gsessionid as well as the SID. Raises hangups.NetworkError if the channel can not be created. """ logger.info('Requesting new gsessionid and SID...') # Set SID and gsessionid to None so they aren't sent in by send_maps. self._sid_param = None self._gsessionid_param = None res = await self.send_maps([]) self._sid_param, self._gsessionid_param = _parse_sid_response(res.body) logger.info('New SID: {}'.format(self._sid_param)) logger.info('New gsessionid: {}'.format(self._gsessionid_param))
def setup_py_source(self): # type: () -> Optional[str] """ Read setup.py to string :return: """ if not self.setup_source: self.setup_source = self._read_file("setup.py") if not self.setup_source: self.setup_source = self._read_file("setup") # rare case return self.setup_source
Read setup.py to string :return:
Below is the the instruction that describes the task: ### Input: Read setup.py to string :return: ### Response: def setup_py_source(self): # type: () -> Optional[str] """ Read setup.py to string :return: """ if not self.setup_source: self.setup_source = self._read_file("setup.py") if not self.setup_source: self.setup_source = self._read_file("setup") # rare case return self.setup_source
def run(self): """Begin serving. Returns the bound port, or 0 for domain socket.""" self._listening_sock, self._address = ( bind_domain_socket(self._address) if self._uds_path else bind_tcp_socket(self._address)) if self._ssl: certfile = os.path.join(os.path.dirname(__file__), 'server.pem') self._listening_sock = _ssl.wrap_socket( self._listening_sock, certfile=certfile, server_side=True) self._accept_thread = threading.Thread(target=self._accept_loop) self._accept_thread.daemon = True self._accept_thread.start() return self.port
Begin serving. Returns the bound port, or 0 for domain socket.
Below is the the instruction that describes the task: ### Input: Begin serving. Returns the bound port, or 0 for domain socket. ### Response: def run(self): """Begin serving. Returns the bound port, or 0 for domain socket.""" self._listening_sock, self._address = ( bind_domain_socket(self._address) if self._uds_path else bind_tcp_socket(self._address)) if self._ssl: certfile = os.path.join(os.path.dirname(__file__), 'server.pem') self._listening_sock = _ssl.wrap_socket( self._listening_sock, certfile=certfile, server_side=True) self._accept_thread = threading.Thread(target=self._accept_loop) self._accept_thread.daemon = True self._accept_thread.start() return self.port
def to_base64(self, skip=()): """ Construct from base64-encoded JSON. """ return base64.b64encode( ensure_bytes( self.to_json(skip=skip), encoding='utf-8', ) )
Construct from base64-encoded JSON.
Below is the the instruction that describes the task: ### Input: Construct from base64-encoded JSON. ### Response: def to_base64(self, skip=()): """ Construct from base64-encoded JSON. """ return base64.b64encode( ensure_bytes( self.to_json(skip=skip), encoding='utf-8', ) )
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'): """Run datcmp on the scattering curves. Inputs: *curves: scattering curves as positional arguments alpha: confidence parameter adjust: adjustment type (string), see the help of datcmp for details test: test (string), see the help of datcmp for details Outputs: matC: the C matrix matp: the matrix of the p values comparing the i-th and j-th exposure matpadj: adjusted p-matrix of the exposures ok: list of the same length as the number of curves. If True, the given curve does not differ significantly from the others. """ if len({len(c) for c in curves}) != 1: raise ValueError('All curves have to be of the same length.') datcmpargs = [] if alpha is not None: datcmpargs.append('--alpha=%f' % alpha) if adjust is not None: datcmpargs.append('--adjust=%s' % adjust) if test is not None: datcmpargs.append('--test=%s' % test) with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td: for i, c in enumerate(curves): mat = np.zeros((len(c), 3)) mat[:, 0] = c.q mat[:, 1] = c.Intensity mat[:, 2] = c.Error np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat) matC = np.zeros((len(curves), len(curves))) + np.nan matp = np.zeros((len(curves), len(curves))) + np.nan matpadj = np.zeros((len(curves), len(curves))) + np.nan ok = np.zeros(len(curves)) + np.nan try: results = subprocess.check_output( ['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode( 'utf-8') except subprocess.CalledProcessError: pass else: for l in results.split('\n'): m = re.match( '^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$', l) if m is not None: i = int(m.group('i')) - 1 j = int(m.group('j')) - 1 matC[i, j] = matC[j, i] = float(m.group('C')) matp[i, j] = matp[j, i] = float(m.group('p')) matpadj[i, j] = matpadj[j, i] = float(m.group('adjp')) else: m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l) if m is not None: ok[int(m.group('i')) - 1] = (m.group('ack') == '*') return matC, matp, matpadj, ok
Run datcmp on the scattering curves. Inputs: *curves: scattering curves as positional arguments alpha: confidence parameter adjust: adjustment type (string), see the help of datcmp for details test: test (string), see the help of datcmp for details Outputs: matC: the C matrix matp: the matrix of the p values comparing the i-th and j-th exposure matpadj: adjusted p-matrix of the exposures ok: list of the same length as the number of curves. If True, the given curve does not differ significantly from the others.
Below is the the instruction that describes the task: ### Input: Run datcmp on the scattering curves. Inputs: *curves: scattering curves as positional arguments alpha: confidence parameter adjust: adjustment type (string), see the help of datcmp for details test: test (string), see the help of datcmp for details Outputs: matC: the C matrix matp: the matrix of the p values comparing the i-th and j-th exposure matpadj: adjusted p-matrix of the exposures ok: list of the same length as the number of curves. If True, the given curve does not differ significantly from the others. ### Response: def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'): """Run datcmp on the scattering curves. Inputs: *curves: scattering curves as positional arguments alpha: confidence parameter adjust: adjustment type (string), see the help of datcmp for details test: test (string), see the help of datcmp for details Outputs: matC: the C matrix matp: the matrix of the p values comparing the i-th and j-th exposure matpadj: adjusted p-matrix of the exposures ok: list of the same length as the number of curves. If True, the given curve does not differ significantly from the others. """ if len({len(c) for c in curves}) != 1: raise ValueError('All curves have to be of the same length.') datcmpargs = [] if alpha is not None: datcmpargs.append('--alpha=%f' % alpha) if adjust is not None: datcmpargs.append('--adjust=%s' % adjust) if test is not None: datcmpargs.append('--test=%s' % test) with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td: for i, c in enumerate(curves): mat = np.zeros((len(c), 3)) mat[:, 0] = c.q mat[:, 1] = c.Intensity mat[:, 2] = c.Error np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat) matC = np.zeros((len(curves), len(curves))) + np.nan matp = np.zeros((len(curves), len(curves))) + np.nan matpadj = np.zeros((len(curves), len(curves))) + np.nan ok = np.zeros(len(curves)) + np.nan try: results = subprocess.check_output( ['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode( 'utf-8') except subprocess.CalledProcessError: pass else: for l in results.split('\n'): m = re.match( '^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$', l) if m is not None: i = int(m.group('i')) - 1 j = int(m.group('j')) - 1 matC[i, j] = matC[j, i] = float(m.group('C')) matp[i, j] = matp[j, i] = float(m.group('p')) matpadj[i, j] = matpadj[j, i] = float(m.group('adjp')) else: m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l) if m is not None: ok[int(m.group('i')) - 1] = (m.group('ack') == '*') return matC, matp, matpadj, ok
def get_files(self, common_name): """Return a bundle of TLS files associated with a common name""" record = self.get_record(common_name) return TLSFileBundle(common_name).from_record(record)
Return a bundle of TLS files associated with a common name
Below is the the instruction that describes the task: ### Input: Return a bundle of TLS files associated with a common name ### Response: def get_files(self, common_name): """Return a bundle of TLS files associated with a common name""" record = self.get_record(common_name) return TLSFileBundle(common_name).from_record(record)
def stage_default_config_file(self): """auto generate default config file, and stage it into the profile.""" s = self.generate_config_file() fname = os.path.join(self.profile_dir.location, self.config_file_name) if self.overwrite or not os.path.exists(fname): self.log.warn("Generating default config file: %r"%(fname)) with open(fname, 'w') as f: f.write(s)
auto generate default config file, and stage it into the profile.
Below is the the instruction that describes the task: ### Input: auto generate default config file, and stage it into the profile. ### Response: def stage_default_config_file(self): """auto generate default config file, and stage it into the profile.""" s = self.generate_config_file() fname = os.path.join(self.profile_dir.location, self.config_file_name) if self.overwrite or not os.path.exists(fname): self.log.warn("Generating default config file: %r"%(fname)) with open(fname, 'w') as f: f.write(s)
def raw(prompt, *args, **kwargs): """Calls input to allow user to input an arbitrary string. User can go back by entering the `go_back` string. Works in both Python 2 and 3. """ go_back = kwargs.get('go_back', '<') type_ = kwargs.get('type', str) default = kwargs.get('default', '') with stdout_redirected(sys.stderr): while True: try: if kwargs.get('secret', False): answer = getpass.getpass(prompt) elif sys.version_info < (3, 0): answer = raw_input(prompt) else: answer = input(prompt) if not answer: answer = default if answer == go_back: raise QuestionnaireGoBack return type_(answer) except ValueError: eprint('\n`{}` is not a valid `{}`\n'.format(answer, type_))
Calls input to allow user to input an arbitrary string. User can go back by entering the `go_back` string. Works in both Python 2 and 3.
Below is the the instruction that describes the task: ### Input: Calls input to allow user to input an arbitrary string. User can go back by entering the `go_back` string. Works in both Python 2 and 3. ### Response: def raw(prompt, *args, **kwargs): """Calls input to allow user to input an arbitrary string. User can go back by entering the `go_back` string. Works in both Python 2 and 3. """ go_back = kwargs.get('go_back', '<') type_ = kwargs.get('type', str) default = kwargs.get('default', '') with stdout_redirected(sys.stderr): while True: try: if kwargs.get('secret', False): answer = getpass.getpass(prompt) elif sys.version_info < (3, 0): answer = raw_input(prompt) else: answer = input(prompt) if not answer: answer = default if answer == go_back: raise QuestionnaireGoBack return type_(answer) except ValueError: eprint('\n`{}` is not a valid `{}`\n'.format(answer, type_))
def setup_gui_analysis_done(self): """Helper method to setup gui if analysis is done.""" self.progress_bar.hide() self.lblAnalysisStatus.setText(tr('Analysis done.')) self.pbnReportWeb.show() self.pbnReportPDF.show() # self.pbnReportComposer.show() # Hide until it works again. self.pbnReportPDF.clicked.connect(self.print_map)
Helper method to setup gui if analysis is done.
Below is the the instruction that describes the task: ### Input: Helper method to setup gui if analysis is done. ### Response: def setup_gui_analysis_done(self): """Helper method to setup gui if analysis is done.""" self.progress_bar.hide() self.lblAnalysisStatus.setText(tr('Analysis done.')) self.pbnReportWeb.show() self.pbnReportPDF.show() # self.pbnReportComposer.show() # Hide until it works again. self.pbnReportPDF.clicked.connect(self.print_map)
def run(self): """ Main entry function. """ history = InMemoryHistory() self._load_file() while True: # (re)load the todo.txt file (only if it has been modified) try: user_input = prompt(u'topydo> ', history=history, completer=self.completer, complete_while_typing=False) user_input = shlex.split(user_input) except EOFError: sys.exit(0) except KeyboardInterrupt: continue except ValueError as verr: error('Error: ' + str(verr)) continue try: (subcommand, args) = get_subcommand(user_input) except ConfigError as ce: error('Error: ' + str(ce) + '. Check your aliases configuration') continue try: if self._execute(subcommand, args) != False: self._post_execute() except TypeError: print(GENERIC_HELP)
Main entry function.
Below is the the instruction that describes the task: ### Input: Main entry function. ### Response: def run(self): """ Main entry function. """ history = InMemoryHistory() self._load_file() while True: # (re)load the todo.txt file (only if it has been modified) try: user_input = prompt(u'topydo> ', history=history, completer=self.completer, complete_while_typing=False) user_input = shlex.split(user_input) except EOFError: sys.exit(0) except KeyboardInterrupt: continue except ValueError as verr: error('Error: ' + str(verr)) continue try: (subcommand, args) = get_subcommand(user_input) except ConfigError as ce: error('Error: ' + str(ce) + '. Check your aliases configuration') continue try: if self._execute(subcommand, args) != False: self._post_execute() except TypeError: print(GENERIC_HELP)
def get_cloud_init_mime(cloud_init): ''' Get a mime multipart encoded string from a cloud-init dict. Currently supports boothooks, scripts and cloud-config. CLI Example: .. code-block:: bash salt myminion boto.get_cloud_init_mime <cloud init> ''' if isinstance(cloud_init, six.string_types): cloud_init = salt.utils.json.loads(cloud_init) _cloud_init = email.mime.multipart.MIMEMultipart() if 'boothooks' in cloud_init: for script_name, script in six.iteritems(cloud_init['boothooks']): _script = email.mime.text.MIMEText(script, 'cloud-boothook') _cloud_init.attach(_script) if 'scripts' in cloud_init: for script_name, script in six.iteritems(cloud_init['scripts']): _script = email.mime.text.MIMEText(script, 'x-shellscript') _cloud_init.attach(_script) if 'cloud-config' in cloud_init: cloud_config = cloud_init['cloud-config'] _cloud_config = email.mime.text.MIMEText( salt.utils.yaml.safe_dump(cloud_config, default_flow_style=False), 'cloud-config') _cloud_init.attach(_cloud_config) return _cloud_init.as_string()
Get a mime multipart encoded string from a cloud-init dict. Currently supports boothooks, scripts and cloud-config. CLI Example: .. code-block:: bash salt myminion boto.get_cloud_init_mime <cloud init>
Below is the the instruction that describes the task: ### Input: Get a mime multipart encoded string from a cloud-init dict. Currently supports boothooks, scripts and cloud-config. CLI Example: .. code-block:: bash salt myminion boto.get_cloud_init_mime <cloud init> ### Response: def get_cloud_init_mime(cloud_init): ''' Get a mime multipart encoded string from a cloud-init dict. Currently supports boothooks, scripts and cloud-config. CLI Example: .. code-block:: bash salt myminion boto.get_cloud_init_mime <cloud init> ''' if isinstance(cloud_init, six.string_types): cloud_init = salt.utils.json.loads(cloud_init) _cloud_init = email.mime.multipart.MIMEMultipart() if 'boothooks' in cloud_init: for script_name, script in six.iteritems(cloud_init['boothooks']): _script = email.mime.text.MIMEText(script, 'cloud-boothook') _cloud_init.attach(_script) if 'scripts' in cloud_init: for script_name, script in six.iteritems(cloud_init['scripts']): _script = email.mime.text.MIMEText(script, 'x-shellscript') _cloud_init.attach(_script) if 'cloud-config' in cloud_init: cloud_config = cloud_init['cloud-config'] _cloud_config = email.mime.text.MIMEText( salt.utils.yaml.safe_dump(cloud_config, default_flow_style=False), 'cloud-config') _cloud_init.attach(_cloud_config) return _cloud_init.as_string()
async def claimWork(self, *args, **kwargs): """ Claim Work Claim pending task(s) for the given `provisionerId`/`workerType` queue. If any work is available (even if fewer than the requested number of tasks, this will return immediately. Otherwise, it will block for tens of seconds waiting for work. If no work appears, it will return an emtpy list of tasks. Callers should sleep a short while (to avoid denial of service in an error condition) and call the endpoint again. This is a simple implementation of "long polling". This method takes input: ``v1/claim-work-request.json#`` This method gives output: ``v1/claim-work-response.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["claimWork"], *args, **kwargs)
Claim Work Claim pending task(s) for the given `provisionerId`/`workerType` queue. If any work is available (even if fewer than the requested number of tasks, this will return immediately. Otherwise, it will block for tens of seconds waiting for work. If no work appears, it will return an emtpy list of tasks. Callers should sleep a short while (to avoid denial of service in an error condition) and call the endpoint again. This is a simple implementation of "long polling". This method takes input: ``v1/claim-work-request.json#`` This method gives output: ``v1/claim-work-response.json#`` This method is ``stable``
Below is the the instruction that describes the task: ### Input: Claim Work Claim pending task(s) for the given `provisionerId`/`workerType` queue. If any work is available (even if fewer than the requested number of tasks, this will return immediately. Otherwise, it will block for tens of seconds waiting for work. If no work appears, it will return an emtpy list of tasks. Callers should sleep a short while (to avoid denial of service in an error condition) and call the endpoint again. This is a simple implementation of "long polling". This method takes input: ``v1/claim-work-request.json#`` This method gives output: ``v1/claim-work-response.json#`` This method is ``stable`` ### Response: async def claimWork(self, *args, **kwargs): """ Claim Work Claim pending task(s) for the given `provisionerId`/`workerType` queue. If any work is available (even if fewer than the requested number of tasks, this will return immediately. Otherwise, it will block for tens of seconds waiting for work. If no work appears, it will return an emtpy list of tasks. Callers should sleep a short while (to avoid denial of service in an error condition) and call the endpoint again. This is a simple implementation of "long polling". This method takes input: ``v1/claim-work-request.json#`` This method gives output: ``v1/claim-work-response.json#`` This method is ``stable`` """ return await self._makeApiCall(self.funcinfo["claimWork"], *args, **kwargs)
def shutdown(self): """Send SIGABRT to child processes to instruct them to stop""" self.signal_children(signal.SIGABRT) # Wait a few iterations when trying to stop children before terminating waiting = 0 while self.living_children: time.sleep(0.5) waiting += 1 if waiting == self.MAX_SHUTDOWN_WAIT: self.signal_children(signal.SIGKILL) break
Send SIGABRT to child processes to instruct them to stop
Below is the the instruction that describes the task: ### Input: Send SIGABRT to child processes to instruct them to stop ### Response: def shutdown(self): """Send SIGABRT to child processes to instruct them to stop""" self.signal_children(signal.SIGABRT) # Wait a few iterations when trying to stop children before terminating waiting = 0 while self.living_children: time.sleep(0.5) waiting += 1 if waiting == self.MAX_SHUTDOWN_WAIT: self.signal_children(signal.SIGKILL) break
def create_requests( requests: Union[Dict, List], *, context: Any = NOCONTEXT, convert_camel_case: bool ) -> Union[Request, Set[Request]]: """ Create a Request object from a dictionary (or list of them). Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. context: If specified, will be the first positional argument in all requests. convert_camel_case: Will convert the method name/any named params to snake case. Returns: A Request object, or a collection of them. """ if isinstance(requests, list): return { Request(context=context, convert_camel_case=convert_camel_case, **request) for request in requests } return Request(context=context, convert_camel_case=convert_camel_case, **requests)
Create a Request object from a dictionary (or list of them). Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. context: If specified, will be the first positional argument in all requests. convert_camel_case: Will convert the method name/any named params to snake case. Returns: A Request object, or a collection of them.
Below is the the instruction that describes the task: ### Input: Create a Request object from a dictionary (or list of them). Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. context: If specified, will be the first positional argument in all requests. convert_camel_case: Will convert the method name/any named params to snake case. Returns: A Request object, or a collection of them. ### Response: def create_requests( requests: Union[Dict, List], *, context: Any = NOCONTEXT, convert_camel_case: bool ) -> Union[Request, Set[Request]]: """ Create a Request object from a dictionary (or list of them). Args: requests: Request object, or a collection of them. methods: The list of methods that can be called. context: If specified, will be the first positional argument in all requests. convert_camel_case: Will convert the method name/any named params to snake case. Returns: A Request object, or a collection of them. """ if isinstance(requests, list): return { Request(context=context, convert_camel_case=convert_camel_case, **request) for request in requests } return Request(context=context, convert_camel_case=convert_camel_case, **requests)
def path_locations(home_dir): """Return the path locations for the environment (where libraries are, where scripts go, etc)""" # XXX: We'd use distutils.sysconfig.get_python_inc/lib but its # prefix arg is broken: http://bugs.python.org/issue3386 if is_win: # Windows has lots of problems with executables with spaces in # the name; this function will remove them (using the ~1 # format): mkdir(home_dir) if ' ' in home_dir: import ctypes GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW size = max(len(home_dir)+1, 256) buf = ctypes.create_unicode_buffer(size) try: u = unicode except NameError: u = str ret = GetShortPathName(u(home_dir), buf, size) if not ret: print('Error: the path "%s" has a space in it' % home_dir) print('We could not determine the short pathname for it.') print('Exiting.') sys.exit(3) home_dir = str(buf.value) lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') if is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') elif is_pypy: lib_dir = home_dir inc_dir = join(home_dir, 'include') bin_dir = join(home_dir, 'bin') elif not is_win: lib_dir = join(home_dir, 'lib', py_version) multiarch_exec = '/usr/bin/multiarch-platform' if is_executable_file(multiarch_exec): # In Mageia (2) and Mandriva distros the include dir must be like: # virtualenv/include/multiarch-x86_64-linux/python2.7 # instead of being virtualenv/include/python2.7 p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() # stdout.strip is needed to remove newline character inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags) else: inc_dir = join(home_dir, 'include', py_version + abiflags) bin_dir = join(home_dir, 'bin') return home_dir, lib_dir, inc_dir, bin_dir
Return the path locations for the environment (where libraries are, where scripts go, etc)
Below is the the instruction that describes the task: ### Input: Return the path locations for the environment (where libraries are, where scripts go, etc) ### Response: def path_locations(home_dir): """Return the path locations for the environment (where libraries are, where scripts go, etc)""" # XXX: We'd use distutils.sysconfig.get_python_inc/lib but its # prefix arg is broken: http://bugs.python.org/issue3386 if is_win: # Windows has lots of problems with executables with spaces in # the name; this function will remove them (using the ~1 # format): mkdir(home_dir) if ' ' in home_dir: import ctypes GetShortPathName = ctypes.windll.kernel32.GetShortPathNameW size = max(len(home_dir)+1, 256) buf = ctypes.create_unicode_buffer(size) try: u = unicode except NameError: u = str ret = GetShortPathName(u(home_dir), buf, size) if not ret: print('Error: the path "%s" has a space in it' % home_dir) print('We could not determine the short pathname for it.') print('Exiting.') sys.exit(3) home_dir = str(buf.value) lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'Scripts') if is_jython: lib_dir = join(home_dir, 'Lib') inc_dir = join(home_dir, 'Include') bin_dir = join(home_dir, 'bin') elif is_pypy: lib_dir = home_dir inc_dir = join(home_dir, 'include') bin_dir = join(home_dir, 'bin') elif not is_win: lib_dir = join(home_dir, 'lib', py_version) multiarch_exec = '/usr/bin/multiarch-platform' if is_executable_file(multiarch_exec): # In Mageia (2) and Mandriva distros the include dir must be like: # virtualenv/include/multiarch-x86_64-linux/python2.7 # instead of being virtualenv/include/python2.7 p = subprocess.Popen(multiarch_exec, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = p.communicate() # stdout.strip is needed to remove newline character inc_dir = join(home_dir, 'include', stdout.strip(), py_version + abiflags) else: inc_dir = join(home_dir, 'include', py_version + abiflags) bin_dir = join(home_dir, 'bin') return home_dir, lib_dir, inc_dir, bin_dir
def canonical_ops(ops): ''' Returns the given operations array sorted with duplicates removed. @param ops checker.Ops @return: checker.Ops ''' new_ops = sorted(set(ops), key=lambda x: (x.entity, x.action)) return new_ops
Returns the given operations array sorted with duplicates removed. @param ops checker.Ops @return: checker.Ops
Below is the the instruction that describes the task: ### Input: Returns the given operations array sorted with duplicates removed. @param ops checker.Ops @return: checker.Ops ### Response: def canonical_ops(ops): ''' Returns the given operations array sorted with duplicates removed. @param ops checker.Ops @return: checker.Ops ''' new_ops = sorted(set(ops), key=lambda x: (x.entity, x.action)) return new_ops
def _find_nearest_cluster(self, point): """! @brief Find nearest cluster to the specified point. @param[in] point (list): Point from dataset. @return (uint, double) Index of nearest cluster and distance to it. """ index_cluster = -1; nearest_distance = float('inf'); for index in range(len(self._representatives)): distance = self._metric(point, self._representatives[index]); if distance < nearest_distance: index_cluster = index; nearest_distance = distance; return index_cluster, nearest_distance;
! @brief Find nearest cluster to the specified point. @param[in] point (list): Point from dataset. @return (uint, double) Index of nearest cluster and distance to it.
Below is the the instruction that describes the task: ### Input: ! @brief Find nearest cluster to the specified point. @param[in] point (list): Point from dataset. @return (uint, double) Index of nearest cluster and distance to it. ### Response: def _find_nearest_cluster(self, point): """! @brief Find nearest cluster to the specified point. @param[in] point (list): Point from dataset. @return (uint, double) Index of nearest cluster and distance to it. """ index_cluster = -1; nearest_distance = float('inf'); for index in range(len(self._representatives)): distance = self._metric(point, self._representatives[index]); if distance < nearest_distance: index_cluster = index; nearest_distance = distance; return index_cluster, nearest_distance;