code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def begin(self): """Begins a transaction. This method is called automatically when entering a with statement, however it can be called explicitly if you don't want to use a context manager. :raises: :class:`~exceptions.ValueError` if the transaction has already begun. """ super(Transaction, self).begin() try: response_pb = self._client._datastore_api.begin_transaction(self.project) self._id = response_pb.transaction except: # noqa: E722 do not use bare except, specify exception instead self._status = self._ABORTED raise
Begins a transaction. This method is called automatically when entering a with statement, however it can be called explicitly if you don't want to use a context manager. :raises: :class:`~exceptions.ValueError` if the transaction has already begun.
Below is the the instruction that describes the task: ### Input: Begins a transaction. This method is called automatically when entering a with statement, however it can be called explicitly if you don't want to use a context manager. :raises: :class:`~exceptions.ValueError` if the transaction has already begun. ### Response: def begin(self): """Begins a transaction. This method is called automatically when entering a with statement, however it can be called explicitly if you don't want to use a context manager. :raises: :class:`~exceptions.ValueError` if the transaction has already begun. """ super(Transaction, self).begin() try: response_pb = self._client._datastore_api.begin_transaction(self.project) self._id = response_pb.transaction except: # noqa: E722 do not use bare except, specify exception instead self._status = self._ABORTED raise
def _build_job_dict(self): """Build a dictionary of `JobDetails` objects for the internal `Link`""" if self.args['dry_run']: status = JobStatus.unknown else: status = JobStatus.not_ready base_config = self.scatter_link.args for jobkey, job_config in sorted(self._job_configs.items()): full_job_config = base_config.copy() full_job_config.update(job_config) ScatterGather._make_scatter_logfile_name(jobkey, self.linkname, full_job_config) logfile = job_config.get('logfile') self._scatter_link._register_job(key=jobkey, job_config=full_job_config, logfile=logfile, status=status)
Build a dictionary of `JobDetails` objects for the internal `Link`
Below is the the instruction that describes the task: ### Input: Build a dictionary of `JobDetails` objects for the internal `Link` ### Response: def _build_job_dict(self): """Build a dictionary of `JobDetails` objects for the internal `Link`""" if self.args['dry_run']: status = JobStatus.unknown else: status = JobStatus.not_ready base_config = self.scatter_link.args for jobkey, job_config in sorted(self._job_configs.items()): full_job_config = base_config.copy() full_job_config.update(job_config) ScatterGather._make_scatter_logfile_name(jobkey, self.linkname, full_job_config) logfile = job_config.get('logfile') self._scatter_link._register_job(key=jobkey, job_config=full_job_config, logfile=logfile, status=status)
def handle(self): """Handles kick off request.""" # Get and verify mr state. mr_id = self.request.get("mapreduce_id") # Log the mr_id since this is started in an unnamed task logging.info("Processing kickoff for job %s", mr_id) state = model.MapreduceState.get_by_job_id(mr_id) if not self._check_mr_state(state, mr_id): return # Create input readers. readers, serialized_readers_entity = self._get_input_readers(state) if readers is None: # We don't have any data. Finish map. logging.warning("Found no mapper input data to process.") state.active = False state.result_status = model.MapreduceState.RESULT_SUCCESS ControllerCallbackHandler._finalize_job( state.mapreduce_spec, state) return False # Create output writers. self._setup_output_writer(state) # Save states and make sure we use the saved input readers for # subsequent operations. result = self._save_states(state, serialized_readers_entity) if result is None: readers, _ = self._get_input_readers(state) elif not result: return queue_name = self.request.headers.get("X-AppEngine-QueueName") KickOffJobHandler._schedule_shards(state.mapreduce_spec, readers, queue_name, state.mapreduce_spec.params["base_path"], state) ControllerCallbackHandler.reschedule( state, state.mapreduce_spec, serial_id=0, queue_name=queue_name)
Handles kick off request.
Below is the the instruction that describes the task: ### Input: Handles kick off request. ### Response: def handle(self): """Handles kick off request.""" # Get and verify mr state. mr_id = self.request.get("mapreduce_id") # Log the mr_id since this is started in an unnamed task logging.info("Processing kickoff for job %s", mr_id) state = model.MapreduceState.get_by_job_id(mr_id) if not self._check_mr_state(state, mr_id): return # Create input readers. readers, serialized_readers_entity = self._get_input_readers(state) if readers is None: # We don't have any data. Finish map. logging.warning("Found no mapper input data to process.") state.active = False state.result_status = model.MapreduceState.RESULT_SUCCESS ControllerCallbackHandler._finalize_job( state.mapreduce_spec, state) return False # Create output writers. self._setup_output_writer(state) # Save states and make sure we use the saved input readers for # subsequent operations. result = self._save_states(state, serialized_readers_entity) if result is None: readers, _ = self._get_input_readers(state) elif not result: return queue_name = self.request.headers.get("X-AppEngine-QueueName") KickOffJobHandler._schedule_shards(state.mapreduce_spec, readers, queue_name, state.mapreduce_spec.params["base_path"], state) ControllerCallbackHandler.reschedule( state, state.mapreduce_spec, serial_id=0, queue_name=queue_name)
def get_phonon_frequencies(self): """calculate phonon frequencies""" # TODO: the following is most likely not correct or suboptimal # hence for demonstration purposes only frequencies = [] for k, v0 in self.data.iteritems(): for v1 in v0.itervalues(): vec = map(abs, v1['dynmat'][k - 1]) frequency = math.sqrt(sum(vec)) * 2. * \ math.pi * 15.633302 # THz frequencies.append(frequency) return frequencies
calculate phonon frequencies
Below is the the instruction that describes the task: ### Input: calculate phonon frequencies ### Response: def get_phonon_frequencies(self): """calculate phonon frequencies""" # TODO: the following is most likely not correct or suboptimal # hence for demonstration purposes only frequencies = [] for k, v0 in self.data.iteritems(): for v1 in v0.itervalues(): vec = map(abs, v1['dynmat'][k - 1]) frequency = math.sqrt(sum(vec)) * 2. * \ math.pi * 15.633302 # THz frequencies.append(frequency) return frequencies
def tokenize(self, untokenized_string: str, model=None): """Alias for tokenize_sentences()—NLTK's PlaintextCorpusReader needs a function called tokenize in functions used as a parameter for sentence tokenization. :type untokenized_string: str :param untokenized_string: A string containing one of more sentences. """ if self.language in INDIAN_LANGUAGES: return self.indian_punctuation_tokenize_regex(untokenized_string) else: return self.tokenize_sentences(untokenized_string)
Alias for tokenize_sentences()—NLTK's PlaintextCorpusReader needs a function called tokenize in functions used as a parameter for sentence tokenization. :type untokenized_string: str :param untokenized_string: A string containing one of more sentences.
Below is the the instruction that describes the task: ### Input: Alias for tokenize_sentences()—NLTK's PlaintextCorpusReader needs a function called tokenize in functions used as a parameter for sentence tokenization. :type untokenized_string: str :param untokenized_string: A string containing one of more sentences. ### Response: def tokenize(self, untokenized_string: str, model=None): """Alias for tokenize_sentences()—NLTK's PlaintextCorpusReader needs a function called tokenize in functions used as a parameter for sentence tokenization. :type untokenized_string: str :param untokenized_string: A string containing one of more sentences. """ if self.language in INDIAN_LANGUAGES: return self.indian_punctuation_tokenize_regex(untokenized_string) else: return self.tokenize_sentences(untokenized_string)
def Get(self): """Fetch client's data and return a proper Client object.""" args = client_pb2.ApiGetClientArgs(client_id=self.client_id) result = self._context.SendRequest("GetClient", args) return Client(data=result, context=self._context)
Fetch client's data and return a proper Client object.
Below is the the instruction that describes the task: ### Input: Fetch client's data and return a proper Client object. ### Response: def Get(self): """Fetch client's data and return a proper Client object.""" args = client_pb2.ApiGetClientArgs(client_id=self.client_id) result = self._context.SendRequest("GetClient", args) return Client(data=result, context=self._context)
def render_path_alias(path): """ Render a known path-alias (used primarily for forced .php redirects) """ redir = path_alias.get_redirect('/' + path) if not redir: raise http_error.NotFound("Path redirection not found") return redir
Render a known path-alias (used primarily for forced .php redirects)
Below is the the instruction that describes the task: ### Input: Render a known path-alias (used primarily for forced .php redirects) ### Response: def render_path_alias(path): """ Render a known path-alias (used primarily for forced .php redirects) """ redir = path_alias.get_redirect('/' + path) if not redir: raise http_error.NotFound("Path redirection not found") return redir
def free_shape(self, name, free=True, **kwargs): """Free/Fix shape parameters of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False). """ src = self.roi.get_source_by_name(name) self.free_source(name, free=free, pars=shape_parameters[src['SpectrumType']], **kwargs)
Free/Fix shape parameters of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False).
Below is the the instruction that describes the task: ### Input: Free/Fix shape parameters of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False). ### Response: def free_shape(self, name, free=True, **kwargs): """Free/Fix shape parameters of a source. Parameters ---------- name : str Source name. free : bool Choose whether to free (free=True) or fix (free=False). """ src = self.roi.get_source_by_name(name) self.free_source(name, free=free, pars=shape_parameters[src['SpectrumType']], **kwargs)
def _set_rp_cand_interface(self, v, load=False): """ Setter method for rp_cand_interface, mapped from YANG variable /rbridge_id/router/hide_pim_holder/pim/rp_candidate/rp_cand_interface (list) If this variable is read-only (config: false) in the source YANG file, then _set_rp_cand_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rp_cand_interface() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("rp_cand_intf_type rp_cand_intf_id",rp_cand_interface.rp_cand_interface, yang_name="rp-cand-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rp-cand-intf-type rp-cand-intf-id', extensions={u'tailf-common': {u'info': u'Interface information', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-compact-syntax': None, u'callpoint': u'PimCandRpCfgCallpoint'}}), is_container='list', yang_name="rp-cand-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface information', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-compact-syntax': None, u'callpoint': u'PimCandRpCfgCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rp_cand_interface must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("rp_cand_intf_type rp_cand_intf_id",rp_cand_interface.rp_cand_interface, yang_name="rp-cand-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rp-cand-intf-type rp-cand-intf-id', extensions={u'tailf-common': {u'info': u'Interface information', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-compact-syntax': None, u'callpoint': u'PimCandRpCfgCallpoint'}}), is_container='list', yang_name="rp-cand-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface information', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-compact-syntax': None, u'callpoint': u'PimCandRpCfgCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='list', is_config=True)""", }) self.__rp_cand_interface = t if hasattr(self, '_set'): self._set()
Setter method for rp_cand_interface, mapped from YANG variable /rbridge_id/router/hide_pim_holder/pim/rp_candidate/rp_cand_interface (list) If this variable is read-only (config: false) in the source YANG file, then _set_rp_cand_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rp_cand_interface() directly.
Below is the the instruction that describes the task: ### Input: Setter method for rp_cand_interface, mapped from YANG variable /rbridge_id/router/hide_pim_holder/pim/rp_candidate/rp_cand_interface (list) If this variable is read-only (config: false) in the source YANG file, then _set_rp_cand_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rp_cand_interface() directly. ### Response: def _set_rp_cand_interface(self, v, load=False): """ Setter method for rp_cand_interface, mapped from YANG variable /rbridge_id/router/hide_pim_holder/pim/rp_candidate/rp_cand_interface (list) If this variable is read-only (config: false) in the source YANG file, then _set_rp_cand_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rp_cand_interface() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("rp_cand_intf_type rp_cand_intf_id",rp_cand_interface.rp_cand_interface, yang_name="rp-cand-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rp-cand-intf-type rp-cand-intf-id', extensions={u'tailf-common': {u'info': u'Interface information', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-compact-syntax': None, u'callpoint': u'PimCandRpCfgCallpoint'}}), is_container='list', yang_name="rp-cand-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface information', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-compact-syntax': None, u'callpoint': u'PimCandRpCfgCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rp_cand_interface must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("rp_cand_intf_type rp_cand_intf_id",rp_cand_interface.rp_cand_interface, yang_name="rp-cand-interface", rest_name="interface", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rp-cand-intf-type rp-cand-intf-id', extensions={u'tailf-common': {u'info': u'Interface information', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-compact-syntax': None, u'callpoint': u'PimCandRpCfgCallpoint'}}), is_container='list', yang_name="rp-cand-interface", rest_name="interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Interface information', u'cli-suppress-mode': None, u'cli-suppress-list-no': None, u'alt-name': u'interface', u'cli-compact-syntax': None, u'callpoint': u'PimCandRpCfgCallpoint'}}, namespace='urn:brocade.com:mgmt:brocade-pim', defining_module='brocade-pim', yang_type='list', is_config=True)""", }) self.__rp_cand_interface = t if hasattr(self, '_set'): self._set()
def split_pks(cols): """Returns a 2-tuple of tuples of ((primary_key_cols), (non_primary_key_cols)).""" pks = [] others = [] for name, col in cols.items(): if col["is_primary_key"] == "t": pks.append(name) else: others.append(name) return (tuple(pks), tuple(others))
Returns a 2-tuple of tuples of ((primary_key_cols), (non_primary_key_cols)).
Below is the the instruction that describes the task: ### Input: Returns a 2-tuple of tuples of ((primary_key_cols), (non_primary_key_cols)). ### Response: def split_pks(cols): """Returns a 2-tuple of tuples of ((primary_key_cols), (non_primary_key_cols)).""" pks = [] others = [] for name, col in cols.items(): if col["is_primary_key"] == "t": pks.append(name) else: others.append(name) return (tuple(pks), tuple(others))
def get_tape(self, start=0, end=10): '''Pretty prints the tape values''' self.tape_start = start self.tape_end = end self.tape_length = end - start tmp = '\n'+"|"+str(start)+"| " for i in xrange(len(self.tape[start:end])): if i == self.cur_cell: tmp += "[" + str(self.tape[i]) + "] " else: tmp += ":" + str(self.tape[i]) + ": " tmp += " |"+str(end)+"|" return tmp
Pretty prints the tape values
Below is the the instruction that describes the task: ### Input: Pretty prints the tape values ### Response: def get_tape(self, start=0, end=10): '''Pretty prints the tape values''' self.tape_start = start self.tape_end = end self.tape_length = end - start tmp = '\n'+"|"+str(start)+"| " for i in xrange(len(self.tape[start:end])): if i == self.cur_cell: tmp += "[" + str(self.tape[i]) + "] " else: tmp += ":" + str(self.tape[i]) + ": " tmp += " |"+str(end)+"|" return tmp
def vswitch_set(self, vswitch_name, **kwargs): """Change the configuration of an existing virtual switch :param str vswitch_name: the name of the virtual switch :param dict kwargs: - grant_userid=<value>: A userid to be added to the access list - user_vlan_id=<value>: user VLAN ID. Support following ways: 1. As single values between 1 and 4094. A maximum of four values may be specified, separated by blanks. Example: 1010 2020 3030 4040 2. As a range of two numbers, separated by a dash (-). A maximum of two ranges may be specified. Example: 10-12 20-22 - revoke_userid=<value>: A userid to be removed from the access list - real_device_address=<value>: The real device address or the real device address and OSA Express port number of a QDIO OSA Express device to be used to create the switch to the virtual adapter. If using a real device and an OSA Express port number, specify the real device number followed by a period(.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of three device addresses, all 1-7 characters in length, may be specified, delimited by blanks. 'None' may also be specified - port_name=<value>: The name used to identify the OSA Expanded adapter. A maximum of three port names, all 1-8 characters in length, may be specified, delimited by blanks. - controller_name=<value>: One of the following: 1. The userid controlling the real device. A maximum of eight userids, all 1-8 characters in length, may be specified, delimited by blanks. 2. '*': Specifies that any available controller may be used - connection_value=<value>: One of the following values: CONnect: Activate the real device connection. DISCONnect: Do not activate the real device connection. - queue_memory_limit=<value>: A number between 1 and 8 specifying the QDIO buffer size in megabytes. - routing_value=<value>: Specifies whether the OSA-Express QDIO device will act as a router to the virtual switch, as follows: NONrouter: The OSA-Express device identified in real_device_address= will not act as a router to the vswitch PRIrouter: The OSA-Express device identified in real_device_address= will act as a primary router to the vswitch - port_type=<value>: Specifies the port type, ACCESS or TRUNK - persist=<value>: one of the following values: NO: The vswitch is updated on the active system, but is not updated in the permanent configuration for the system. YES: The vswitch is updated on the active system and also in the permanent configuration for the system. If not specified, the default is NO. - gvrp_value=<value>: GVRP or NOGVRP - mac_id=<value>: A unique identifier (up to six hexadecimal digits) used as part of the vswitch MAC address - uplink=<value>: One of the following: NO: The port being enabled is not the vswitch's UPLINK port. YES: The port being enabled is the vswitch's UPLINK port. - nic_userid=<value>: One of the following: 1. The userid of the port to/from which the UPLINK port will be connected or disconnected. If a userid is specified, then nic_vdev= must also be specified 2. '*': Disconnect the currently connected guest port to/from the special virtual switch UPLINK port. (This is equivalent to specifying NIC NONE on CP SET VSWITCH). - nic_vdev=<value>: The virtual device to/from which the the UPLINK port will be connected/disconnected. If this value is specified, nic_userid= must also be specified, with a userid. - lacp=<value>: One of the following values: ACTIVE: Indicates that the virtual switch will initiate negotiations with the physical switch via the link aggregation control protocol (LACP) and will respond to LACP packets sent by the physical switch. INACTIVE: Indicates that aggregation is to be performed, but without LACP. - Interval=<value>: The interval to be used by the control program (CP) when doing load balancing of conversations across multiple links in the group. This can be any of the following values: 1 - 9990: Indicates the number of seconds between load balancing operations across the link aggregation group. OFF: Indicates that no load balancing is done. - group_rdev=<value>: The real device address or the real device address and OSA Express port number of a QDIO OSA Express devcie to be affected within the link aggregation group associated with this vswitch. If using a real device and an OSA Express port number, specify the real device number followed by a period (.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of eight device addresses all 1-7 characters in length, may be specified, delimited by blanks. Note: If a real device address is specified, this device will be added to the link aggregation group associated with this vswitch. (The link aggregation group will be created if it does not already exist.) - iptimeout=<value>: A number between 1 and 240 specifying the length of time in minutes that a remote IP address table entry remains in the IP address table for the virtual switch. - port_isolation=<value>: ON or OFF - promiscuous=<value>: One of the following: NO: The userid or port on the grant is not authorized to use the vswitch in promiscuous mode YES: The userid or port on the grant is authorized to use the vswitch in promiscuous mode. - MAC_protect=<value>: ON, OFF or UNSPECified - VLAN_counters=<value>: ON or OFF """ for k in kwargs.keys(): if k not in constants.SET_VSWITCH_KEYWORDS: errmsg = ('API vswitch_set: Invalid keyword %s' % k) raise exception.SDKInvalidInputFormat(msg=errmsg) self._networkops.set_vswitch(vswitch_name, **kwargs)
Change the configuration of an existing virtual switch :param str vswitch_name: the name of the virtual switch :param dict kwargs: - grant_userid=<value>: A userid to be added to the access list - user_vlan_id=<value>: user VLAN ID. Support following ways: 1. As single values between 1 and 4094. A maximum of four values may be specified, separated by blanks. Example: 1010 2020 3030 4040 2. As a range of two numbers, separated by a dash (-). A maximum of two ranges may be specified. Example: 10-12 20-22 - revoke_userid=<value>: A userid to be removed from the access list - real_device_address=<value>: The real device address or the real device address and OSA Express port number of a QDIO OSA Express device to be used to create the switch to the virtual adapter. If using a real device and an OSA Express port number, specify the real device number followed by a period(.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of three device addresses, all 1-7 characters in length, may be specified, delimited by blanks. 'None' may also be specified - port_name=<value>: The name used to identify the OSA Expanded adapter. A maximum of three port names, all 1-8 characters in length, may be specified, delimited by blanks. - controller_name=<value>: One of the following: 1. The userid controlling the real device. A maximum of eight userids, all 1-8 characters in length, may be specified, delimited by blanks. 2. '*': Specifies that any available controller may be used - connection_value=<value>: One of the following values: CONnect: Activate the real device connection. DISCONnect: Do not activate the real device connection. - queue_memory_limit=<value>: A number between 1 and 8 specifying the QDIO buffer size in megabytes. - routing_value=<value>: Specifies whether the OSA-Express QDIO device will act as a router to the virtual switch, as follows: NONrouter: The OSA-Express device identified in real_device_address= will not act as a router to the vswitch PRIrouter: The OSA-Express device identified in real_device_address= will act as a primary router to the vswitch - port_type=<value>: Specifies the port type, ACCESS or TRUNK - persist=<value>: one of the following values: NO: The vswitch is updated on the active system, but is not updated in the permanent configuration for the system. YES: The vswitch is updated on the active system and also in the permanent configuration for the system. If not specified, the default is NO. - gvrp_value=<value>: GVRP or NOGVRP - mac_id=<value>: A unique identifier (up to six hexadecimal digits) used as part of the vswitch MAC address - uplink=<value>: One of the following: NO: The port being enabled is not the vswitch's UPLINK port. YES: The port being enabled is the vswitch's UPLINK port. - nic_userid=<value>: One of the following: 1. The userid of the port to/from which the UPLINK port will be connected or disconnected. If a userid is specified, then nic_vdev= must also be specified 2. '*': Disconnect the currently connected guest port to/from the special virtual switch UPLINK port. (This is equivalent to specifying NIC NONE on CP SET VSWITCH). - nic_vdev=<value>: The virtual device to/from which the the UPLINK port will be connected/disconnected. If this value is specified, nic_userid= must also be specified, with a userid. - lacp=<value>: One of the following values: ACTIVE: Indicates that the virtual switch will initiate negotiations with the physical switch via the link aggregation control protocol (LACP) and will respond to LACP packets sent by the physical switch. INACTIVE: Indicates that aggregation is to be performed, but without LACP. - Interval=<value>: The interval to be used by the control program (CP) when doing load balancing of conversations across multiple links in the group. This can be any of the following values: 1 - 9990: Indicates the number of seconds between load balancing operations across the link aggregation group. OFF: Indicates that no load balancing is done. - group_rdev=<value>: The real device address or the real device address and OSA Express port number of a QDIO OSA Express devcie to be affected within the link aggregation group associated with this vswitch. If using a real device and an OSA Express port number, specify the real device number followed by a period (.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of eight device addresses all 1-7 characters in length, may be specified, delimited by blanks. Note: If a real device address is specified, this device will be added to the link aggregation group associated with this vswitch. (The link aggregation group will be created if it does not already exist.) - iptimeout=<value>: A number between 1 and 240 specifying the length of time in minutes that a remote IP address table entry remains in the IP address table for the virtual switch. - port_isolation=<value>: ON or OFF - promiscuous=<value>: One of the following: NO: The userid or port on the grant is not authorized to use the vswitch in promiscuous mode YES: The userid or port on the grant is authorized to use the vswitch in promiscuous mode. - MAC_protect=<value>: ON, OFF or UNSPECified - VLAN_counters=<value>: ON or OFF
Below is the the instruction that describes the task: ### Input: Change the configuration of an existing virtual switch :param str vswitch_name: the name of the virtual switch :param dict kwargs: - grant_userid=<value>: A userid to be added to the access list - user_vlan_id=<value>: user VLAN ID. Support following ways: 1. As single values between 1 and 4094. A maximum of four values may be specified, separated by blanks. Example: 1010 2020 3030 4040 2. As a range of two numbers, separated by a dash (-). A maximum of two ranges may be specified. Example: 10-12 20-22 - revoke_userid=<value>: A userid to be removed from the access list - real_device_address=<value>: The real device address or the real device address and OSA Express port number of a QDIO OSA Express device to be used to create the switch to the virtual adapter. If using a real device and an OSA Express port number, specify the real device number followed by a period(.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of three device addresses, all 1-7 characters in length, may be specified, delimited by blanks. 'None' may also be specified - port_name=<value>: The name used to identify the OSA Expanded adapter. A maximum of three port names, all 1-8 characters in length, may be specified, delimited by blanks. - controller_name=<value>: One of the following: 1. The userid controlling the real device. A maximum of eight userids, all 1-8 characters in length, may be specified, delimited by blanks. 2. '*': Specifies that any available controller may be used - connection_value=<value>: One of the following values: CONnect: Activate the real device connection. DISCONnect: Do not activate the real device connection. - queue_memory_limit=<value>: A number between 1 and 8 specifying the QDIO buffer size in megabytes. - routing_value=<value>: Specifies whether the OSA-Express QDIO device will act as a router to the virtual switch, as follows: NONrouter: The OSA-Express device identified in real_device_address= will not act as a router to the vswitch PRIrouter: The OSA-Express device identified in real_device_address= will act as a primary router to the vswitch - port_type=<value>: Specifies the port type, ACCESS or TRUNK - persist=<value>: one of the following values: NO: The vswitch is updated on the active system, but is not updated in the permanent configuration for the system. YES: The vswitch is updated on the active system and also in the permanent configuration for the system. If not specified, the default is NO. - gvrp_value=<value>: GVRP or NOGVRP - mac_id=<value>: A unique identifier (up to six hexadecimal digits) used as part of the vswitch MAC address - uplink=<value>: One of the following: NO: The port being enabled is not the vswitch's UPLINK port. YES: The port being enabled is the vswitch's UPLINK port. - nic_userid=<value>: One of the following: 1. The userid of the port to/from which the UPLINK port will be connected or disconnected. If a userid is specified, then nic_vdev= must also be specified 2. '*': Disconnect the currently connected guest port to/from the special virtual switch UPLINK port. (This is equivalent to specifying NIC NONE on CP SET VSWITCH). - nic_vdev=<value>: The virtual device to/from which the the UPLINK port will be connected/disconnected. If this value is specified, nic_userid= must also be specified, with a userid. - lacp=<value>: One of the following values: ACTIVE: Indicates that the virtual switch will initiate negotiations with the physical switch via the link aggregation control protocol (LACP) and will respond to LACP packets sent by the physical switch. INACTIVE: Indicates that aggregation is to be performed, but without LACP. - Interval=<value>: The interval to be used by the control program (CP) when doing load balancing of conversations across multiple links in the group. This can be any of the following values: 1 - 9990: Indicates the number of seconds between load balancing operations across the link aggregation group. OFF: Indicates that no load balancing is done. - group_rdev=<value>: The real device address or the real device address and OSA Express port number of a QDIO OSA Express devcie to be affected within the link aggregation group associated with this vswitch. If using a real device and an OSA Express port number, specify the real device number followed by a period (.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of eight device addresses all 1-7 characters in length, may be specified, delimited by blanks. Note: If a real device address is specified, this device will be added to the link aggregation group associated with this vswitch. (The link aggregation group will be created if it does not already exist.) - iptimeout=<value>: A number between 1 and 240 specifying the length of time in minutes that a remote IP address table entry remains in the IP address table for the virtual switch. - port_isolation=<value>: ON or OFF - promiscuous=<value>: One of the following: NO: The userid or port on the grant is not authorized to use the vswitch in promiscuous mode YES: The userid or port on the grant is authorized to use the vswitch in promiscuous mode. - MAC_protect=<value>: ON, OFF or UNSPECified - VLAN_counters=<value>: ON or OFF ### Response: def vswitch_set(self, vswitch_name, **kwargs): """Change the configuration of an existing virtual switch :param str vswitch_name: the name of the virtual switch :param dict kwargs: - grant_userid=<value>: A userid to be added to the access list - user_vlan_id=<value>: user VLAN ID. Support following ways: 1. As single values between 1 and 4094. A maximum of four values may be specified, separated by blanks. Example: 1010 2020 3030 4040 2. As a range of two numbers, separated by a dash (-). A maximum of two ranges may be specified. Example: 10-12 20-22 - revoke_userid=<value>: A userid to be removed from the access list - real_device_address=<value>: The real device address or the real device address and OSA Express port number of a QDIO OSA Express device to be used to create the switch to the virtual adapter. If using a real device and an OSA Express port number, specify the real device number followed by a period(.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of three device addresses, all 1-7 characters in length, may be specified, delimited by blanks. 'None' may also be specified - port_name=<value>: The name used to identify the OSA Expanded adapter. A maximum of three port names, all 1-8 characters in length, may be specified, delimited by blanks. - controller_name=<value>: One of the following: 1. The userid controlling the real device. A maximum of eight userids, all 1-8 characters in length, may be specified, delimited by blanks. 2. '*': Specifies that any available controller may be used - connection_value=<value>: One of the following values: CONnect: Activate the real device connection. DISCONnect: Do not activate the real device connection. - queue_memory_limit=<value>: A number between 1 and 8 specifying the QDIO buffer size in megabytes. - routing_value=<value>: Specifies whether the OSA-Express QDIO device will act as a router to the virtual switch, as follows: NONrouter: The OSA-Express device identified in real_device_address= will not act as a router to the vswitch PRIrouter: The OSA-Express device identified in real_device_address= will act as a primary router to the vswitch - port_type=<value>: Specifies the port type, ACCESS or TRUNK - persist=<value>: one of the following values: NO: The vswitch is updated on the active system, but is not updated in the permanent configuration for the system. YES: The vswitch is updated on the active system and also in the permanent configuration for the system. If not specified, the default is NO. - gvrp_value=<value>: GVRP or NOGVRP - mac_id=<value>: A unique identifier (up to six hexadecimal digits) used as part of the vswitch MAC address - uplink=<value>: One of the following: NO: The port being enabled is not the vswitch's UPLINK port. YES: The port being enabled is the vswitch's UPLINK port. - nic_userid=<value>: One of the following: 1. The userid of the port to/from which the UPLINK port will be connected or disconnected. If a userid is specified, then nic_vdev= must also be specified 2. '*': Disconnect the currently connected guest port to/from the special virtual switch UPLINK port. (This is equivalent to specifying NIC NONE on CP SET VSWITCH). - nic_vdev=<value>: The virtual device to/from which the the UPLINK port will be connected/disconnected. If this value is specified, nic_userid= must also be specified, with a userid. - lacp=<value>: One of the following values: ACTIVE: Indicates that the virtual switch will initiate negotiations with the physical switch via the link aggregation control protocol (LACP) and will respond to LACP packets sent by the physical switch. INACTIVE: Indicates that aggregation is to be performed, but without LACP. - Interval=<value>: The interval to be used by the control program (CP) when doing load balancing of conversations across multiple links in the group. This can be any of the following values: 1 - 9990: Indicates the number of seconds between load balancing operations across the link aggregation group. OFF: Indicates that no load balancing is done. - group_rdev=<value>: The real device address or the real device address and OSA Express port number of a QDIO OSA Express devcie to be affected within the link aggregation group associated with this vswitch. If using a real device and an OSA Express port number, specify the real device number followed by a period (.), the letter 'P' (or 'p'), followed by the port number as a hexadecimal number. A maximum of eight device addresses all 1-7 characters in length, may be specified, delimited by blanks. Note: If a real device address is specified, this device will be added to the link aggregation group associated with this vswitch. (The link aggregation group will be created if it does not already exist.) - iptimeout=<value>: A number between 1 and 240 specifying the length of time in minutes that a remote IP address table entry remains in the IP address table for the virtual switch. - port_isolation=<value>: ON or OFF - promiscuous=<value>: One of the following: NO: The userid or port on the grant is not authorized to use the vswitch in promiscuous mode YES: The userid or port on the grant is authorized to use the vswitch in promiscuous mode. - MAC_protect=<value>: ON, OFF or UNSPECified - VLAN_counters=<value>: ON or OFF """ for k in kwargs.keys(): if k not in constants.SET_VSWITCH_KEYWORDS: errmsg = ('API vswitch_set: Invalid keyword %s' % k) raise exception.SDKInvalidInputFormat(msg=errmsg) self._networkops.set_vswitch(vswitch_name, **kwargs)
def hum44(msg): """humidity Args: msg (String): 28 bytes hexadecimal message string Returns: float: percentage of humidity, [0 - 100] % """ d = hex2bin(data(msg)) if d[49] == '0': return None hm = bin2int(d[50:56]) * 100.0 / 64 # % return round(hm, 1)
humidity Args: msg (String): 28 bytes hexadecimal message string Returns: float: percentage of humidity, [0 - 100] %
Below is the the instruction that describes the task: ### Input: humidity Args: msg (String): 28 bytes hexadecimal message string Returns: float: percentage of humidity, [0 - 100] % ### Response: def hum44(msg): """humidity Args: msg (String): 28 bytes hexadecimal message string Returns: float: percentage of humidity, [0 - 100] % """ d = hex2bin(data(msg)) if d[49] == '0': return None hm = bin2int(d[50:56]) * 100.0 / 64 # % return round(hm, 1)
def filters_query(filters): """ Turn the tuple of filters into SQL WHERE statements The key (column name) & operator have already been vetted so they can be trusted but the value could still be evil so it MUST be a parameterized input! That is done by creating a param dict where they key name & val look like: '{}_{}'.format(key, oper): val The key is constructed the way it is to ensure uniqueness, if we just used the key name then it could get clobbered. Ultimately the WHERE statement will look something like: age >= {age_gte} where age_gte is the key name in the param dict with a value of the evil user input. In the end, a string statement & dict param are returned as a tuple if any filters were provided otherwise None. :return: tuple (string, dict) """ def _cast_val(filtr): """ Perform any needed casting on the filter value This could be tasks like including '%' signs at certain anchor points based on the filter or even wrapping it in certain functions. """ val = filtr.val if filtr.oper in ('contains', 'icontains'): val = '%' + filtr.val + '%' elif filtr.oper == 'endswith': val = '%' + filtr.val elif filtr.oper == 'startswith': val = filtr.val + '%' return val def _filter(filtr): """ Process each individual Filter object """ oper = FILTER_TABLE[filtr.oper] prop = '{field}_{oper}'.format( field=filtr.field.replace('.', '_'), oper=filtr.oper, ) if isinstance(filtr, FilterRel): stmt = _filter_rel(filtr, oper, prop) else: stmt = '{field} {oper} %({prop})s'.format( field=filtr.field, oper=oper, prop=prop, ) return stmt, {prop: _cast_val(filtr)} def _filter_or(filters): """ Given a FilterOr object return a SQL query """ param = {} stmts = [] for filtr in filters: vals = _filter(filtr) param.update(vals[1]) stmts.append(vals[0]) stmt = ' OR '.join(stmts) stmt = '({})'.format(stmt) return stmt, param def _filter_rel(rel, oper, prop): """ Given a FilterRel object return a SQL sub query """ stmt = """ {field} IN (SELECT {foreign_field} FROM {foreign_rtype} WHERE {foreign_filter} {oper} %({prop})s) """ return stmt.format( field=rel.local_field, foreign_field=rel.foreign_field, foreign_filter=rel.foreign_filter, foreign_rtype=rel.foreign_rtype, oper=oper, prop=prop, ) param = {} stmts = [] for filtr in filters: if isinstance(filtr, FilterOr): vals = _filter_or(filtr) else: vals = _filter(filtr) param.update(vals[1]) stmts.append(vals[0]) if stmts: stmt = ' AND '.join(stmts) stmt = ' WHERE ' + stmt return stmt, param
Turn the tuple of filters into SQL WHERE statements The key (column name) & operator have already been vetted so they can be trusted but the value could still be evil so it MUST be a parameterized input! That is done by creating a param dict where they key name & val look like: '{}_{}'.format(key, oper): val The key is constructed the way it is to ensure uniqueness, if we just used the key name then it could get clobbered. Ultimately the WHERE statement will look something like: age >= {age_gte} where age_gte is the key name in the param dict with a value of the evil user input. In the end, a string statement & dict param are returned as a tuple if any filters were provided otherwise None. :return: tuple (string, dict)
Below is the the instruction that describes the task: ### Input: Turn the tuple of filters into SQL WHERE statements The key (column name) & operator have already been vetted so they can be trusted but the value could still be evil so it MUST be a parameterized input! That is done by creating a param dict where they key name & val look like: '{}_{}'.format(key, oper): val The key is constructed the way it is to ensure uniqueness, if we just used the key name then it could get clobbered. Ultimately the WHERE statement will look something like: age >= {age_gte} where age_gte is the key name in the param dict with a value of the evil user input. In the end, a string statement & dict param are returned as a tuple if any filters were provided otherwise None. :return: tuple (string, dict) ### Response: def filters_query(filters): """ Turn the tuple of filters into SQL WHERE statements The key (column name) & operator have already been vetted so they can be trusted but the value could still be evil so it MUST be a parameterized input! That is done by creating a param dict where they key name & val look like: '{}_{}'.format(key, oper): val The key is constructed the way it is to ensure uniqueness, if we just used the key name then it could get clobbered. Ultimately the WHERE statement will look something like: age >= {age_gte} where age_gte is the key name in the param dict with a value of the evil user input. In the end, a string statement & dict param are returned as a tuple if any filters were provided otherwise None. :return: tuple (string, dict) """ def _cast_val(filtr): """ Perform any needed casting on the filter value This could be tasks like including '%' signs at certain anchor points based on the filter or even wrapping it in certain functions. """ val = filtr.val if filtr.oper in ('contains', 'icontains'): val = '%' + filtr.val + '%' elif filtr.oper == 'endswith': val = '%' + filtr.val elif filtr.oper == 'startswith': val = filtr.val + '%' return val def _filter(filtr): """ Process each individual Filter object """ oper = FILTER_TABLE[filtr.oper] prop = '{field}_{oper}'.format( field=filtr.field.replace('.', '_'), oper=filtr.oper, ) if isinstance(filtr, FilterRel): stmt = _filter_rel(filtr, oper, prop) else: stmt = '{field} {oper} %({prop})s'.format( field=filtr.field, oper=oper, prop=prop, ) return stmt, {prop: _cast_val(filtr)} def _filter_or(filters): """ Given a FilterOr object return a SQL query """ param = {} stmts = [] for filtr in filters: vals = _filter(filtr) param.update(vals[1]) stmts.append(vals[0]) stmt = ' OR '.join(stmts) stmt = '({})'.format(stmt) return stmt, param def _filter_rel(rel, oper, prop): """ Given a FilterRel object return a SQL sub query """ stmt = """ {field} IN (SELECT {foreign_field} FROM {foreign_rtype} WHERE {foreign_filter} {oper} %({prop})s) """ return stmt.format( field=rel.local_field, foreign_field=rel.foreign_field, foreign_filter=rel.foreign_filter, foreign_rtype=rel.foreign_rtype, oper=oper, prop=prop, ) param = {} stmts = [] for filtr in filters: if isinstance(filtr, FilterOr): vals = _filter_or(filtr) else: vals = _filter(filtr) param.update(vals[1]) stmts.append(vals[0]) if stmts: stmt = ' AND '.join(stmts) stmt = ' WHERE ' + stmt return stmt, param
def append(self, row): """ Appends a row to the list. Takes a dictionary, returns a row. """ if isinstance(row, dict): row = self.Row(row) elif isinstance(row, self.Row): pass elif isinstance(row, SharePointListRow): raise TypeError("row must be a dict or an instance of SharePointList.Row, not SharePointListRow") else: raise TypeError("row must be a dict or an instance of SharePointList.Row") self.rows # Make sure self._rows exists. self._rows.append(row) return row
Appends a row to the list. Takes a dictionary, returns a row.
Below is the the instruction that describes the task: ### Input: Appends a row to the list. Takes a dictionary, returns a row. ### Response: def append(self, row): """ Appends a row to the list. Takes a dictionary, returns a row. """ if isinstance(row, dict): row = self.Row(row) elif isinstance(row, self.Row): pass elif isinstance(row, SharePointListRow): raise TypeError("row must be a dict or an instance of SharePointList.Row, not SharePointListRow") else: raise TypeError("row must be a dict or an instance of SharePointList.Row") self.rows # Make sure self._rows exists. self._rows.append(row) return row
def _build_host_livestate(self, host_name, livestate): # pylint: disable=no-self-use, too-many-locals """Build and notify the external command for an host livestate PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host_name: the concerned host name :param livestate: livestate dictionary :return: external command line """ state = livestate.get('state', 'UP').upper() output = livestate.get('output', '') long_output = livestate.get('long_output', '') perf_data = livestate.get('perf_data', '') try: timestamp = int(livestate.get('timestamp', 'ABC')) except ValueError: timestamp = None host_state_to_id = { "UP": 0, "DOWN": 1, "UNREACHABLE": 2 } parameters = '%s;%s' % (host_state_to_id.get(state, 3), output) if long_output and perf_data: parameters = '%s|%s\n%s' % (parameters, perf_data, long_output) elif long_output: parameters = '%s\n%s' % (parameters, long_output) elif perf_data: parameters = '%s|%s' % (parameters, perf_data) command_line = 'PROCESS_HOST_CHECK_RESULT;%s;%s' % (host_name, parameters) if timestamp is not None: command_line = '[%d] %s' % (timestamp, command_line) else: command_line = '[%d] %s' % (int(time.time()), command_line) return command_line
Build and notify the external command for an host livestate PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host_name: the concerned host name :param livestate: livestate dictionary :return: external command line
Below is the the instruction that describes the task: ### Input: Build and notify the external command for an host livestate PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host_name: the concerned host name :param livestate: livestate dictionary :return: external command line ### Response: def _build_host_livestate(self, host_name, livestate): # pylint: disable=no-self-use, too-many-locals """Build and notify the external command for an host livestate PROCESS_HOST_CHECK_RESULT;<host_name>;<status_code>;<plugin_output> :param host_name: the concerned host name :param livestate: livestate dictionary :return: external command line """ state = livestate.get('state', 'UP').upper() output = livestate.get('output', '') long_output = livestate.get('long_output', '') perf_data = livestate.get('perf_data', '') try: timestamp = int(livestate.get('timestamp', 'ABC')) except ValueError: timestamp = None host_state_to_id = { "UP": 0, "DOWN": 1, "UNREACHABLE": 2 } parameters = '%s;%s' % (host_state_to_id.get(state, 3), output) if long_output and perf_data: parameters = '%s|%s\n%s' % (parameters, perf_data, long_output) elif long_output: parameters = '%s\n%s' % (parameters, long_output) elif perf_data: parameters = '%s|%s' % (parameters, perf_data) command_line = 'PROCESS_HOST_CHECK_RESULT;%s;%s' % (host_name, parameters) if timestamp is not None: command_line = '[%d] %s' % (timestamp, command_line) else: command_line = '[%d] %s' % (int(time.time()), command_line) return command_line
def plot_fit(self, intervals=True, **kwargs): """ Plots the fit of the model Parameters ---------- intervals : Boolean Whether to plot 95% confidence interval of states Returns ---------- None (plots data and the fit) """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) series_type = kwargs.get('series_type','Smoothed') if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: date_index = copy.deepcopy(self.index) date_index = date_index[self.integ:self.data_original.shape[0]+1] if series_type == 'Smoothed': mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values()) elif series_type == 'Filtered': mu, V, _, _, _ = self._model(self.data,self.latent_variables.get_z_values()) else: mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values()) mu = mu[0][:-1] V = V.ravel() plt.figure(figsize=figsize) plt.subplot(3, 1, 1) plt.title(self.data_name + " Raw and " + series_type) if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] plt.fill_between(date_index[2:], mu[2:] + 1.98*np.sqrt(V[:-1][2:]), mu[2:] - 1.98*np.sqrt(V[:-1][2:]), alpha=0.15,label='95% C.I.') plt.plot(date_index,self.data,label='Data') plt.plot(date_index,mu,label=series_type,c='black') plt.legend(loc=2) plt.subplot(3, 1, 2) plt.title(self.data_name + " Local Level") if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] plt.fill_between(date_index[2:], mu[2:] + 1.98*np.sqrt(V[:-1][2:]), mu[2:] - 1.98*np.sqrt(V[:-1][2:]), alpha=0.15,label='95% C.I.') plt.plot(date_index,mu,label='Local Level') plt.legend(loc=2) plt.subplot(3, 1, 3) plt.title("Measurement Noise") plt.plot(date_index,self.data-mu) plt.show()
Plots the fit of the model Parameters ---------- intervals : Boolean Whether to plot 95% confidence interval of states Returns ---------- None (plots data and the fit)
Below is the the instruction that describes the task: ### Input: Plots the fit of the model Parameters ---------- intervals : Boolean Whether to plot 95% confidence interval of states Returns ---------- None (plots data and the fit) ### Response: def plot_fit(self, intervals=True, **kwargs): """ Plots the fit of the model Parameters ---------- intervals : Boolean Whether to plot 95% confidence interval of states Returns ---------- None (plots data and the fit) """ import matplotlib.pyplot as plt import seaborn as sns figsize = kwargs.get('figsize',(10,7)) series_type = kwargs.get('series_type','Smoothed') if self.latent_variables.estimated is False: raise Exception("No latent variables estimated!") else: date_index = copy.deepcopy(self.index) date_index = date_index[self.integ:self.data_original.shape[0]+1] if series_type == 'Smoothed': mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values()) elif series_type == 'Filtered': mu, V, _, _, _ = self._model(self.data,self.latent_variables.get_z_values()) else: mu, V = self.smoothed_state(self.data,self.latent_variables.get_z_values()) mu = mu[0][:-1] V = V.ravel() plt.figure(figsize=figsize) plt.subplot(3, 1, 1) plt.title(self.data_name + " Raw and " + series_type) if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] plt.fill_between(date_index[2:], mu[2:] + 1.98*np.sqrt(V[:-1][2:]), mu[2:] - 1.98*np.sqrt(V[:-1][2:]), alpha=0.15,label='95% C.I.') plt.plot(date_index,self.data,label='Data') plt.plot(date_index,mu,label=series_type,c='black') plt.legend(loc=2) plt.subplot(3, 1, 2) plt.title(self.data_name + " Local Level") if intervals == True: alpha =[0.15*i/float(100) for i in range(50,12,-2)] plt.fill_between(date_index[2:], mu[2:] + 1.98*np.sqrt(V[:-1][2:]), mu[2:] - 1.98*np.sqrt(V[:-1][2:]), alpha=0.15,label='95% C.I.') plt.plot(date_index,mu,label='Local Level') plt.legend(loc=2) plt.subplot(3, 1, 3) plt.title("Measurement Noise") plt.plot(date_index,self.data-mu) plt.show()
def inspect(self): """ Inspect access attempt, used for catpcha flow :return: """ last_attempt = self.get_last_failed_access_attempt( ip_address=self.ip, captcha_enabled=True, captcha_passed=False, is_expired=False ) if last_attempt is None and not self.request.user.is_authenticated(): # create a new entry user_access = self._FailedAccessAttemptModel( ip_address=self.ip, username=self.username, captcha_enabled=True, captcha_passed=False, is_expired=False ) elif last_attempt: user_access = last_attempt if self.request.method == 'POST': if not self.request.user.is_authenticated(): user_access.user_agent = self.request.META.get('HTTP_USER_AGENT', '<unknown user agent>')[:255] user_access.username = self.username user_access.failed_attempts += 1 user_access.params_get = self.request.GET user_access.params_post = self.request.POST if user_access.failed_attempts >= self.max_failed_attempts: user_access.is_locked = True user_access.save() elif self.request.user.is_authenticated() and last_attempt: last_attempt.is_expired = True last_attempt.save()
Inspect access attempt, used for catpcha flow :return:
Below is the the instruction that describes the task: ### Input: Inspect access attempt, used for catpcha flow :return: ### Response: def inspect(self): """ Inspect access attempt, used for catpcha flow :return: """ last_attempt = self.get_last_failed_access_attempt( ip_address=self.ip, captcha_enabled=True, captcha_passed=False, is_expired=False ) if last_attempt is None and not self.request.user.is_authenticated(): # create a new entry user_access = self._FailedAccessAttemptModel( ip_address=self.ip, username=self.username, captcha_enabled=True, captcha_passed=False, is_expired=False ) elif last_attempt: user_access = last_attempt if self.request.method == 'POST': if not self.request.user.is_authenticated(): user_access.user_agent = self.request.META.get('HTTP_USER_AGENT', '<unknown user agent>')[:255] user_access.username = self.username user_access.failed_attempts += 1 user_access.params_get = self.request.GET user_access.params_post = self.request.POST if user_access.failed_attempts >= self.max_failed_attempts: user_access.is_locked = True user_access.save() elif self.request.user.is_authenticated() and last_attempt: last_attempt.is_expired = True last_attempt.save()
def plot_color_legend(legend, horizontal=False, ax=None): """ Plot a pandas Series with labels and colors. Parameters ---------- legend : pandas.Series Pandas Series whose values are RGB triples and whose index contains categorical labels. horizontal : bool If True, plot horizontally. ax : matplotlib.axis Axis to plot on. Returns ------- ax : matplotlib.axis Plot axis. """ import matplotlib.pyplot as plt import numpy as np t = np.array([np.array([x for x in legend])]) if ax is None: fig, ax = plt.subplots(1, 1) if horizontal: ax.imshow(t, interpolation='none') ax.set_yticks([]) ax.set_xticks(np.arange(0, legend.shape[0])) t = ax.set_xticklabels(legend.index) else: t = t.reshape([legend.shape[0], 1, 3]) ax.imshow(t, interpolation='none') ax.set_xticks([]) ax.set_yticks(np.arange(0, legend.shape[0])) t = ax.set_yticklabels(legend.index) return ax
Plot a pandas Series with labels and colors. Parameters ---------- legend : pandas.Series Pandas Series whose values are RGB triples and whose index contains categorical labels. horizontal : bool If True, plot horizontally. ax : matplotlib.axis Axis to plot on. Returns ------- ax : matplotlib.axis Plot axis.
Below is the the instruction that describes the task: ### Input: Plot a pandas Series with labels and colors. Parameters ---------- legend : pandas.Series Pandas Series whose values are RGB triples and whose index contains categorical labels. horizontal : bool If True, plot horizontally. ax : matplotlib.axis Axis to plot on. Returns ------- ax : matplotlib.axis Plot axis. ### Response: def plot_color_legend(legend, horizontal=False, ax=None): """ Plot a pandas Series with labels and colors. Parameters ---------- legend : pandas.Series Pandas Series whose values are RGB triples and whose index contains categorical labels. horizontal : bool If True, plot horizontally. ax : matplotlib.axis Axis to plot on. Returns ------- ax : matplotlib.axis Plot axis. """ import matplotlib.pyplot as plt import numpy as np t = np.array([np.array([x for x in legend])]) if ax is None: fig, ax = plt.subplots(1, 1) if horizontal: ax.imshow(t, interpolation='none') ax.set_yticks([]) ax.set_xticks(np.arange(0, legend.shape[0])) t = ax.set_xticklabels(legend.index) else: t = t.reshape([legend.shape[0], 1, 3]) ax.imshow(t, interpolation='none') ax.set_xticks([]) ax.set_yticks(np.arange(0, legend.shape[0])) t = ax.set_yticklabels(legend.index) return ax
def matrix_element(ji, fi, mi, jj, fj, mj, II, reduced_matrix_element, q=None, numeric=True, convention=1): r"""Calculate a matrix element of the electric dipole (in the helicity basis). We calculate the matrix element for the cyclical transition of the D2 line in Rb 87. >>> from sympy import symbols >>> red = symbols("r", positive=True) >>> half = 1/Integer(2) >>> II = 3*half >>> matrix_element(3*half, 3, 3, half, 2, 2, II, red, q=1, numeric=False) r/2 If no polarization component is specified, all are returned. >>> matrix_element(3*half, 3, 3, half, 2, 2, II, red, numeric=False) [0, 0, r/2] """ if q is None: return [matrix_element(ji, fi, mi, jj, fj, mj, II, reduced_matrix_element, qi, numeric=numeric, convention=convention) for qi in [-1, 0, 1]] if numeric: from numpy import sqrt as numsqrt sqrt = numsqrt else: from sympy import sqrt as symsqrt sqrt = symsqrt rpij = (-1)**(fi-mi) rpij *= wigner_3j(fi, 1, fj, -mi, q, mj) rpij *= (-1)**(fj+ji+1+II) rpij *= sqrt(2*fj+1) rpij *= sqrt(2*fi+1) rpij *= wigner_6j(ji, jj, 1, fj, fi, II) rpij *= reduced_matrix_element if convention == 2: rpij = rpij * sqrt(2*ji+1) if numeric: rpij = float(rpij) return rpij
r"""Calculate a matrix element of the electric dipole (in the helicity basis). We calculate the matrix element for the cyclical transition of the D2 line in Rb 87. >>> from sympy import symbols >>> red = symbols("r", positive=True) >>> half = 1/Integer(2) >>> II = 3*half >>> matrix_element(3*half, 3, 3, half, 2, 2, II, red, q=1, numeric=False) r/2 If no polarization component is specified, all are returned. >>> matrix_element(3*half, 3, 3, half, 2, 2, II, red, numeric=False) [0, 0, r/2]
Below is the the instruction that describes the task: ### Input: r"""Calculate a matrix element of the electric dipole (in the helicity basis). We calculate the matrix element for the cyclical transition of the D2 line in Rb 87. >>> from sympy import symbols >>> red = symbols("r", positive=True) >>> half = 1/Integer(2) >>> II = 3*half >>> matrix_element(3*half, 3, 3, half, 2, 2, II, red, q=1, numeric=False) r/2 If no polarization component is specified, all are returned. >>> matrix_element(3*half, 3, 3, half, 2, 2, II, red, numeric=False) [0, 0, r/2] ### Response: def matrix_element(ji, fi, mi, jj, fj, mj, II, reduced_matrix_element, q=None, numeric=True, convention=1): r"""Calculate a matrix element of the electric dipole (in the helicity basis). We calculate the matrix element for the cyclical transition of the D2 line in Rb 87. >>> from sympy import symbols >>> red = symbols("r", positive=True) >>> half = 1/Integer(2) >>> II = 3*half >>> matrix_element(3*half, 3, 3, half, 2, 2, II, red, q=1, numeric=False) r/2 If no polarization component is specified, all are returned. >>> matrix_element(3*half, 3, 3, half, 2, 2, II, red, numeric=False) [0, 0, r/2] """ if q is None: return [matrix_element(ji, fi, mi, jj, fj, mj, II, reduced_matrix_element, qi, numeric=numeric, convention=convention) for qi in [-1, 0, 1]] if numeric: from numpy import sqrt as numsqrt sqrt = numsqrt else: from sympy import sqrt as symsqrt sqrt = symsqrt rpij = (-1)**(fi-mi) rpij *= wigner_3j(fi, 1, fj, -mi, q, mj) rpij *= (-1)**(fj+ji+1+II) rpij *= sqrt(2*fj+1) rpij *= sqrt(2*fi+1) rpij *= wigner_6j(ji, jj, 1, fj, fi, II) rpij *= reduced_matrix_element if convention == 2: rpij = rpij * sqrt(2*ji+1) if numeric: rpij = float(rpij) return rpij
def create_roots(self, yam): """Create the top-level structure for module `yam`.""" self.local_grammar = SchemaNode("grammar") self.local_grammar.attr = { "ns": yam.search_one("namespace").arg, "nma:module": self.module.arg} src_text = "YANG module '%s'" % yam.arg revs = yam.search("revision") if len(revs) > 0: src_text += " revision %s" % self.current_revision(revs) self.dc_element(self.local_grammar, "source", src_text) start = SchemaNode("start", self.local_grammar) self.data = SchemaNode("nma:data", start, interleave=True) self.data.occur = 2 self.rpcs = SchemaNode("nma:rpcs", start, interleave=False) self.notifications = SchemaNode("nma:notifications", start, interleave=False)
Create the top-level structure for module `yam`.
Below is the the instruction that describes the task: ### Input: Create the top-level structure for module `yam`. ### Response: def create_roots(self, yam): """Create the top-level structure for module `yam`.""" self.local_grammar = SchemaNode("grammar") self.local_grammar.attr = { "ns": yam.search_one("namespace").arg, "nma:module": self.module.arg} src_text = "YANG module '%s'" % yam.arg revs = yam.search("revision") if len(revs) > 0: src_text += " revision %s" % self.current_revision(revs) self.dc_element(self.local_grammar, "source", src_text) start = SchemaNode("start", self.local_grammar) self.data = SchemaNode("nma:data", start, interleave=True) self.data.occur = 2 self.rpcs = SchemaNode("nma:rpcs", start, interleave=False) self.notifications = SchemaNode("nma:notifications", start, interleave=False)
def cudnnCreateTensorDescriptor(): """ Create a Tensor descriptor object. Allocates a cudnnTensorDescriptor_t structure and returns a pointer to it. Returns ------- tensor_descriptor : int Tensor descriptor. """ tensor = ctypes.c_void_p() status = _libcudnn.cudnnCreateTensorDescriptor(ctypes.byref(tensor)) cudnnCheckStatus(status) return tensor.value
Create a Tensor descriptor object. Allocates a cudnnTensorDescriptor_t structure and returns a pointer to it. Returns ------- tensor_descriptor : int Tensor descriptor.
Below is the the instruction that describes the task: ### Input: Create a Tensor descriptor object. Allocates a cudnnTensorDescriptor_t structure and returns a pointer to it. Returns ------- tensor_descriptor : int Tensor descriptor. ### Response: def cudnnCreateTensorDescriptor(): """ Create a Tensor descriptor object. Allocates a cudnnTensorDescriptor_t structure and returns a pointer to it. Returns ------- tensor_descriptor : int Tensor descriptor. """ tensor = ctypes.c_void_p() status = _libcudnn.cudnnCreateTensorDescriptor(ctypes.byref(tensor)) cudnnCheckStatus(status) return tensor.value
def user(self, message): """ Creates a user log (if user logging is turned on) Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT Note: Does *not* use Java string formatting like Sikuli. Format your message with Python ``basestring.format()`` instead. """ if Settings.UserLogs: self._write_log(Settings.UserLogPrefix, Settings.UserLogTime, message)
Creates a user log (if user logging is turned on) Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT Note: Does *not* use Java string formatting like Sikuli. Format your message with Python ``basestring.format()`` instead.
Below is the the instruction that describes the task: ### Input: Creates a user log (if user logging is turned on) Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT Note: Does *not* use Java string formatting like Sikuli. Format your message with Python ``basestring.format()`` instead. ### Response: def user(self, message): """ Creates a user log (if user logging is turned on) Uses the log path defined by ``Debug.setUserLogFile()``. If no log file is defined, sends to STDOUT Note: Does *not* use Java string formatting like Sikuli. Format your message with Python ``basestring.format()`` instead. """ if Settings.UserLogs: self._write_log(Settings.UserLogPrefix, Settings.UserLogTime, message)
def _expand_help(self, action): """ This method is copied verbatim from ArgumentDefaultsHelpFormatter with a couple of lines added just before the end. Reason: we need to `repr()` default values instead of simply inserting them as is. This helps notice, for example, an empty string as the default value; moreover, it prevents breaking argparse due to logical quirks inside of its formatters. Ideally this could be achieved by simply defining :attr:`DEFAULT_ARGUMENT_TEMPLATE` as ``{default!r}`` but unfortunately argparse only supports the old printf syntax. """ params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is argparse.SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str # XXX this is added in Argh vs. argparse.ArgumentDefaultsHelpFormatter # (avoiding empty strings, otherwise Argparse would die with # an IndexError in _format_action) # if 'default' in params: if params['default'] is None: params['default'] = '-' else: params['default'] = repr(params['default']) # # / return self._get_help_string(action) % params
This method is copied verbatim from ArgumentDefaultsHelpFormatter with a couple of lines added just before the end. Reason: we need to `repr()` default values instead of simply inserting them as is. This helps notice, for example, an empty string as the default value; moreover, it prevents breaking argparse due to logical quirks inside of its formatters. Ideally this could be achieved by simply defining :attr:`DEFAULT_ARGUMENT_TEMPLATE` as ``{default!r}`` but unfortunately argparse only supports the old printf syntax.
Below is the the instruction that describes the task: ### Input: This method is copied verbatim from ArgumentDefaultsHelpFormatter with a couple of lines added just before the end. Reason: we need to `repr()` default values instead of simply inserting them as is. This helps notice, for example, an empty string as the default value; moreover, it prevents breaking argparse due to logical quirks inside of its formatters. Ideally this could be achieved by simply defining :attr:`DEFAULT_ARGUMENT_TEMPLATE` as ``{default!r}`` but unfortunately argparse only supports the old printf syntax. ### Response: def _expand_help(self, action): """ This method is copied verbatim from ArgumentDefaultsHelpFormatter with a couple of lines added just before the end. Reason: we need to `repr()` default values instead of simply inserting them as is. This helps notice, for example, an empty string as the default value; moreover, it prevents breaking argparse due to logical quirks inside of its formatters. Ideally this could be achieved by simply defining :attr:`DEFAULT_ARGUMENT_TEMPLATE` as ``{default!r}`` but unfortunately argparse only supports the old printf syntax. """ params = dict(vars(action), prog=self._prog) for name in list(params): if params[name] is argparse.SUPPRESS: del params[name] for name in list(params): if hasattr(params[name], '__name__'): params[name] = params[name].__name__ if params.get('choices') is not None: choices_str = ', '.join([str(c) for c in params['choices']]) params['choices'] = choices_str # XXX this is added in Argh vs. argparse.ArgumentDefaultsHelpFormatter # (avoiding empty strings, otherwise Argparse would die with # an IndexError in _format_action) # if 'default' in params: if params['default'] is None: params['default'] = '-' else: params['default'] = repr(params['default']) # # / return self._get_help_string(action) % params
def _GetDataStreams(self): """Retrieves the data streams. Returns: list[TSKDataStream]: data streams. """ if self._data_streams is None: if self._file_system.IsHFS(): known_data_attribute_types = [ pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT, pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA] elif self._file_system.IsNTFS(): known_data_attribute_types = [pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA] else: known_data_attribute_types = None self._data_streams = [] tsk_fs_meta_type = getattr( self._tsk_file.info.meta, 'type', pytsk3.TSK_FS_META_TYPE_UNDEF) if not known_data_attribute_types: if tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_REG: data_stream = TSKDataStream(self._file_system, None) self._data_streams.append(data_stream) else: for tsk_attribute in self._tsk_file: # NTFS allows directories to have data streams. if (not self._file_system.IsNTFS() and tsk_fs_meta_type != pytsk3.TSK_FS_META_TYPE_REG): continue if getattr(tsk_attribute, 'info', None) is None: continue attribute_type = getattr(tsk_attribute.info, 'type', None) if attribute_type in known_data_attribute_types: data_stream = TSKDataStream(self._file_system, tsk_attribute) self._data_streams.append(data_stream) return self._data_streams
Retrieves the data streams. Returns: list[TSKDataStream]: data streams.
Below is the the instruction that describes the task: ### Input: Retrieves the data streams. Returns: list[TSKDataStream]: data streams. ### Response: def _GetDataStreams(self): """Retrieves the data streams. Returns: list[TSKDataStream]: data streams. """ if self._data_streams is None: if self._file_system.IsHFS(): known_data_attribute_types = [ pytsk3.TSK_FS_ATTR_TYPE_HFS_DEFAULT, pytsk3.TSK_FS_ATTR_TYPE_HFS_DATA] elif self._file_system.IsNTFS(): known_data_attribute_types = [pytsk3.TSK_FS_ATTR_TYPE_NTFS_DATA] else: known_data_attribute_types = None self._data_streams = [] tsk_fs_meta_type = getattr( self._tsk_file.info.meta, 'type', pytsk3.TSK_FS_META_TYPE_UNDEF) if not known_data_attribute_types: if tsk_fs_meta_type == pytsk3.TSK_FS_META_TYPE_REG: data_stream = TSKDataStream(self._file_system, None) self._data_streams.append(data_stream) else: for tsk_attribute in self._tsk_file: # NTFS allows directories to have data streams. if (not self._file_system.IsNTFS() and tsk_fs_meta_type != pytsk3.TSK_FS_META_TYPE_REG): continue if getattr(tsk_attribute, 'info', None) is None: continue attribute_type = getattr(tsk_attribute.info, 'type', None) if attribute_type in known_data_attribute_types: data_stream = TSKDataStream(self._file_system, tsk_attribute) self._data_streams.append(data_stream) return self._data_streams
def kill(self): """ Delete my persistent file (i.e. pickle file), if it exists. """ if os.path.isfile(self.filename): os.remove(self.filename) return
Delete my persistent file (i.e. pickle file), if it exists.
Below is the the instruction that describes the task: ### Input: Delete my persistent file (i.e. pickle file), if it exists. ### Response: def kill(self): """ Delete my persistent file (i.e. pickle file), if it exists. """ if os.path.isfile(self.filename): os.remove(self.filename) return
def focus(self, focus: Optional[URIPARM]) -> None: """ Set the focus node(s). If no focus node is specified, the evaluation will occur for all non-BNode graph subjects. Otherwise it can be a string, a URIRef or a list of string/URIRef combinations :param focus: None if focus should be all URIRefs in the graph otherwise a URI or list of URI's """ self._focus = normalize_uriparm(focus) if focus else None
Set the focus node(s). If no focus node is specified, the evaluation will occur for all non-BNode graph subjects. Otherwise it can be a string, a URIRef or a list of string/URIRef combinations :param focus: None if focus should be all URIRefs in the graph otherwise a URI or list of URI's
Below is the the instruction that describes the task: ### Input: Set the focus node(s). If no focus node is specified, the evaluation will occur for all non-BNode graph subjects. Otherwise it can be a string, a URIRef or a list of string/URIRef combinations :param focus: None if focus should be all URIRefs in the graph otherwise a URI or list of URI's ### Response: def focus(self, focus: Optional[URIPARM]) -> None: """ Set the focus node(s). If no focus node is specified, the evaluation will occur for all non-BNode graph subjects. Otherwise it can be a string, a URIRef or a list of string/URIRef combinations :param focus: None if focus should be all URIRefs in the graph otherwise a URI or list of URI's """ self._focus = normalize_uriparm(focus) if focus else None
def get_duration_h_m(start: Union[str, DateTime], end: Union[str, DateTime], default: str = "N/A") -> str: """ Calculate the time between two dates/times expressed as strings. Args: start: start date/time end: end date/time default: string value to return in case either of the inputs is ``None`` Returns: a string that is one of .. code-block: 'hh:mm' '-hh:mm' default """ start = coerce_to_pendulum(start) end = coerce_to_pendulum(end) if start is None or end is None: return default duration = end - start minutes = duration.in_minutes() (hours, minutes) = divmod(minutes, 60) if hours < 0: # negative... trickier # Python's divmod does interesting things with negative numbers: # Hours will be negative, and minutes always positive hours += 1 minutes = 60 - minutes return "-{}:{}".format(hours, "00" if minutes == 0 else minutes) else: return "{}:{}".format(hours, "00" if minutes == 0 else minutes)
Calculate the time between two dates/times expressed as strings. Args: start: start date/time end: end date/time default: string value to return in case either of the inputs is ``None`` Returns: a string that is one of .. code-block: 'hh:mm' '-hh:mm' default
Below is the the instruction that describes the task: ### Input: Calculate the time between two dates/times expressed as strings. Args: start: start date/time end: end date/time default: string value to return in case either of the inputs is ``None`` Returns: a string that is one of .. code-block: 'hh:mm' '-hh:mm' default ### Response: def get_duration_h_m(start: Union[str, DateTime], end: Union[str, DateTime], default: str = "N/A") -> str: """ Calculate the time between two dates/times expressed as strings. Args: start: start date/time end: end date/time default: string value to return in case either of the inputs is ``None`` Returns: a string that is one of .. code-block: 'hh:mm' '-hh:mm' default """ start = coerce_to_pendulum(start) end = coerce_to_pendulum(end) if start is None or end is None: return default duration = end - start minutes = duration.in_minutes() (hours, minutes) = divmod(minutes, 60) if hours < 0: # negative... trickier # Python's divmod does interesting things with negative numbers: # Hours will be negative, and minutes always positive hours += 1 minutes = 60 - minutes return "-{}:{}".format(hours, "00" if minutes == 0 else minutes) else: return "{}:{}".format(hours, "00" if minutes == 0 else minutes)
def _update_file_args(self, file_mapping): """Adjust the arguments to deal with staging files to the scratch area""" for key, value in self.args.items(): new_value = file_mapping.get(value, value) if new_value != value: self.args[key] = new_value
Adjust the arguments to deal with staging files to the scratch area
Below is the the instruction that describes the task: ### Input: Adjust the arguments to deal with staging files to the scratch area ### Response: def _update_file_args(self, file_mapping): """Adjust the arguments to deal with staging files to the scratch area""" for key, value in self.args.items(): new_value = file_mapping.get(value, value) if new_value != value: self.args[key] = new_value
def min(self): """Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type. """ if self.is_quantized or self.base_dtype in ( bool, string, complex64, complex128, ): raise TypeError("Cannot find minimum value of %s." % self) # there is no simple way to get the min value of a dtype, we have to check # float and int types separately try: return np.finfo(self.as_numpy_dtype()).min except: # bare except as possible raises by finfo not documented try: return np.iinfo(self.as_numpy_dtype()).min except: if self.base_dtype == bfloat16: return _np_bfloat16(float.fromhex("-0x1.FEp127")) raise TypeError("Cannot find minimum value of %s." % self)
Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type.
Below is the the instruction that describes the task: ### Input: Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type. ### Response: def min(self): """Returns the minimum representable value in this data type. Raises: TypeError: if this is a non-numeric, unordered, or quantized type. """ if self.is_quantized or self.base_dtype in ( bool, string, complex64, complex128, ): raise TypeError("Cannot find minimum value of %s." % self) # there is no simple way to get the min value of a dtype, we have to check # float and int types separately try: return np.finfo(self.as_numpy_dtype()).min except: # bare except as possible raises by finfo not documented try: return np.iinfo(self.as_numpy_dtype()).min except: if self.base_dtype == bfloat16: return _np_bfloat16(float.fromhex("-0x1.FEp127")) raise TypeError("Cannot find minimum value of %s." % self)
def wait_for_ilo_after_reset(ilo_object): """Continuously polls for iLO to come up after reset.""" is_ilo_up_after_reset = lambda: ilo_object.get_product_name() is not None is_ilo_up_after_reset.__name__ = 'is_ilo_up_after_reset' wait_for_operation_to_complete( is_ilo_up_after_reset, failover_exc=exception.IloConnectionError, failover_msg='iLO is not up after reset.' )
Continuously polls for iLO to come up after reset.
Below is the the instruction that describes the task: ### Input: Continuously polls for iLO to come up after reset. ### Response: def wait_for_ilo_after_reset(ilo_object): """Continuously polls for iLO to come up after reset.""" is_ilo_up_after_reset = lambda: ilo_object.get_product_name() is not None is_ilo_up_after_reset.__name__ = 'is_ilo_up_after_reset' wait_for_operation_to_complete( is_ilo_up_after_reset, failover_exc=exception.IloConnectionError, failover_msg='iLO is not up after reset.' )
def split_surface_u(obj, param, **kwargs): """ Splits the surface at the input parametric coordinate on the u-direction. This method splits the surface into two pieces at the given parametric coordinate on the u-direction, generates two different surface objects and returns them. It does not modify the input surface. Keyword Arguments: * ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear` * ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot` :param obj: surface :type obj: abstract.Surface :param param: parameter for the u-direction :type param: float :return: a list of surface patches :rtype: list """ # Validate input if not isinstance(obj, abstract.Surface): raise GeomdlException("Input shape must be an instance of abstract.Surface class") if param == obj.knotvector_u[0] or param == obj.knotvector_u[-1]: raise GeomdlException("Cannot split on the edge") # Keyword arguments span_func = kwargs.get('find_span_func', helpers.find_span_linear) # FindSpan implementation insert_knot_func = kwargs.get('insert_knot_func', insert_knot) # Knot insertion algorithm # Find multiplicity of the knot ks = span_func(obj.degree_u, obj.knotvector_u, obj.ctrlpts_size_u, param) - obj.degree_u + 1 s = helpers.find_multiplicity(param, obj.knotvector_u) r = obj.degree_u - s # Create backups of the original surface temp_obj = copy.deepcopy(obj) # Split the original surface insert_knot_func(temp_obj, [param, None], num=[r, 0], check_num=False) # Knot vectors knot_span = span_func(temp_obj.degree_u, temp_obj.knotvector_u, temp_obj.ctrlpts_size_u, param) + 1 surf1_kv = list(temp_obj.knotvector_u[0:knot_span]) surf1_kv.append(param) surf2_kv = list(temp_obj.knotvector_u[knot_span:]) for _ in range(0, temp_obj.degree_u + 1): surf2_kv.insert(0, param) # Control points surf1_ctrlpts = temp_obj.ctrlpts2d[0:ks + r] surf2_ctrlpts = temp_obj.ctrlpts2d[ks + r - 1:] # Create a new surface for the first half surf1 = temp_obj.__class__() surf1.degree_u = temp_obj.degree_u surf1.degree_v = temp_obj.degree_v surf1.ctrlpts2d = surf1_ctrlpts surf1.knotvector_u = surf1_kv surf1.knotvector_v = temp_obj.knotvector_v # Create another surface fot the second half surf2 = temp_obj.__class__() surf2.degree_u = temp_obj.degree_u surf2.degree_v = temp_obj.degree_v surf2.ctrlpts2d = surf2_ctrlpts surf2.knotvector_u = surf2_kv surf2.knotvector_v = temp_obj.knotvector_v # Return the new surfaces ret_val = [surf1, surf2] return ret_val
Splits the surface at the input parametric coordinate on the u-direction. This method splits the surface into two pieces at the given parametric coordinate on the u-direction, generates two different surface objects and returns them. It does not modify the input surface. Keyword Arguments: * ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear` * ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot` :param obj: surface :type obj: abstract.Surface :param param: parameter for the u-direction :type param: float :return: a list of surface patches :rtype: list
Below is the the instruction that describes the task: ### Input: Splits the surface at the input parametric coordinate on the u-direction. This method splits the surface into two pieces at the given parametric coordinate on the u-direction, generates two different surface objects and returns them. It does not modify the input surface. Keyword Arguments: * ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear` * ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot` :param obj: surface :type obj: abstract.Surface :param param: parameter for the u-direction :type param: float :return: a list of surface patches :rtype: list ### Response: def split_surface_u(obj, param, **kwargs): """ Splits the surface at the input parametric coordinate on the u-direction. This method splits the surface into two pieces at the given parametric coordinate on the u-direction, generates two different surface objects and returns them. It does not modify the input surface. Keyword Arguments: * ``find_span_func``: FindSpan implementation. *Default:* :func:`.helpers.find_span_linear` * ``insert_knot_func``: knot insertion algorithm implementation. *Default:* :func:`.operations.insert_knot` :param obj: surface :type obj: abstract.Surface :param param: parameter for the u-direction :type param: float :return: a list of surface patches :rtype: list """ # Validate input if not isinstance(obj, abstract.Surface): raise GeomdlException("Input shape must be an instance of abstract.Surface class") if param == obj.knotvector_u[0] or param == obj.knotvector_u[-1]: raise GeomdlException("Cannot split on the edge") # Keyword arguments span_func = kwargs.get('find_span_func', helpers.find_span_linear) # FindSpan implementation insert_knot_func = kwargs.get('insert_knot_func', insert_knot) # Knot insertion algorithm # Find multiplicity of the knot ks = span_func(obj.degree_u, obj.knotvector_u, obj.ctrlpts_size_u, param) - obj.degree_u + 1 s = helpers.find_multiplicity(param, obj.knotvector_u) r = obj.degree_u - s # Create backups of the original surface temp_obj = copy.deepcopy(obj) # Split the original surface insert_knot_func(temp_obj, [param, None], num=[r, 0], check_num=False) # Knot vectors knot_span = span_func(temp_obj.degree_u, temp_obj.knotvector_u, temp_obj.ctrlpts_size_u, param) + 1 surf1_kv = list(temp_obj.knotvector_u[0:knot_span]) surf1_kv.append(param) surf2_kv = list(temp_obj.knotvector_u[knot_span:]) for _ in range(0, temp_obj.degree_u + 1): surf2_kv.insert(0, param) # Control points surf1_ctrlpts = temp_obj.ctrlpts2d[0:ks + r] surf2_ctrlpts = temp_obj.ctrlpts2d[ks + r - 1:] # Create a new surface for the first half surf1 = temp_obj.__class__() surf1.degree_u = temp_obj.degree_u surf1.degree_v = temp_obj.degree_v surf1.ctrlpts2d = surf1_ctrlpts surf1.knotvector_u = surf1_kv surf1.knotvector_v = temp_obj.knotvector_v # Create another surface fot the second half surf2 = temp_obj.__class__() surf2.degree_u = temp_obj.degree_u surf2.degree_v = temp_obj.degree_v surf2.ctrlpts2d = surf2_ctrlpts surf2.knotvector_u = surf2_kv surf2.knotvector_v = temp_obj.knotvector_v # Return the new surfaces ret_val = [surf1, surf2] return ret_val
def number(self, phone_number, send_digits=None, url=None, method=None, status_callback_event=None, status_callback=None, status_callback_method=None, **kwargs): """ Create a <Number> element :param phone_number: Phone Number to dial :param send_digits: DTMF tones to play when the call is answered :param url: TwiML URL :param method: TwiML URL method :param status_callback_event: Events to call status callback :param status_callback: Status callback URL :param status_callback_method: Status callback URL method :param kwargs: additional attributes :returns: <Number> element """ return self.nest(Number( phone_number, send_digits=send_digits, url=url, method=method, status_callback_event=status_callback_event, status_callback=status_callback, status_callback_method=status_callback_method, **kwargs ))
Create a <Number> element :param phone_number: Phone Number to dial :param send_digits: DTMF tones to play when the call is answered :param url: TwiML URL :param method: TwiML URL method :param status_callback_event: Events to call status callback :param status_callback: Status callback URL :param status_callback_method: Status callback URL method :param kwargs: additional attributes :returns: <Number> element
Below is the the instruction that describes the task: ### Input: Create a <Number> element :param phone_number: Phone Number to dial :param send_digits: DTMF tones to play when the call is answered :param url: TwiML URL :param method: TwiML URL method :param status_callback_event: Events to call status callback :param status_callback: Status callback URL :param status_callback_method: Status callback URL method :param kwargs: additional attributes :returns: <Number> element ### Response: def number(self, phone_number, send_digits=None, url=None, method=None, status_callback_event=None, status_callback=None, status_callback_method=None, **kwargs): """ Create a <Number> element :param phone_number: Phone Number to dial :param send_digits: DTMF tones to play when the call is answered :param url: TwiML URL :param method: TwiML URL method :param status_callback_event: Events to call status callback :param status_callback: Status callback URL :param status_callback_method: Status callback URL method :param kwargs: additional attributes :returns: <Number> element """ return self.nest(Number( phone_number, send_digits=send_digits, url=url, method=method, status_callback_event=status_callback_event, status_callback=status_callback, status_callback_method=status_callback_method, **kwargs ))
def touch(path, content="", encoding="utf-8", overwrite=False): """Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise. """ path = os.path.abspath(path) if not overwrite and os.path.exists(path): logger.warning('touch: "%s" already exists', path) return False try: logger.info("touch: %s", path) with io.open(path, "wb") as f: if not isinstance(content, six.binary_type): content = content.encode(encoding) f.write(content) return True except Exception as e: logger.error("touch: %s failed. Error: %s", path, e) return False
Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise.
Below is the the instruction that describes the task: ### Input: Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise. ### Response: def touch(path, content="", encoding="utf-8", overwrite=False): """Create a file at the given path if it does not already exists. Args: path (str): Path to the file. content (str): Optional content that will be written in the file. encoding (str): Encoding in which to write the content. Default: ``utf-8`` overwrite (bool): Overwrite the file if exists. Returns: bool: True if the operation is successful, False otherwise. """ path = os.path.abspath(path) if not overwrite and os.path.exists(path): logger.warning('touch: "%s" already exists', path) return False try: logger.info("touch: %s", path) with io.open(path, "wb") as f: if not isinstance(content, six.binary_type): content = content.encode(encoding) f.write(content) return True except Exception as e: logger.error("touch: %s failed. Error: %s", path, e) return False
def init(ctx, force): """Wizard to create a project-level configuration file.""" if os.path.exists(PROJECT_CONFIG) and not force: click.secho( 'An existing configuration file was found at "{}".\n' .format(PROJECT_CONFIG), fg='red', bold=True ) click.secho( 'Please remove it before in order to run the setup wizard or use\n' 'the --force flag to overwrite it.' ) ctx.exit(1) project_key = click.prompt('Project key on the issue tracker') base_branch = click.prompt('Integration branch', default='master') virtualenvs = ('.venv', '.env', 'venv', 'env') for p in virtualenvs: if os.path.exists(os.path.join(p, 'bin', 'activate')): venv = p break else: venv = '' venv_path = click.prompt('Path to virtual environment', default=venv) project_id = click.prompt('Project ID on Harvest', type=int) task_id = click.prompt('Task id on Harvest', type=int) config = configparser.ConfigParser() config.add_section('lancet') config.set('lancet', 'virtualenv', venv_path) config.add_section('tracker') config.set('tracker', 'default_project', project_key) config.add_section('harvest') config.set('harvest', 'project_id', str(project_id)) config.set('harvest', 'task_id', str(task_id)) config.add_section('repository') config.set('repository', 'base_branch', base_branch) with open(PROJECT_CONFIG, 'w') as fh: config.write(fh) click.secho('\nConfiguration correctly written to "{}".' .format(PROJECT_CONFIG), fg='green')
Wizard to create a project-level configuration file.
Below is the the instruction that describes the task: ### Input: Wizard to create a project-level configuration file. ### Response: def init(ctx, force): """Wizard to create a project-level configuration file.""" if os.path.exists(PROJECT_CONFIG) and not force: click.secho( 'An existing configuration file was found at "{}".\n' .format(PROJECT_CONFIG), fg='red', bold=True ) click.secho( 'Please remove it before in order to run the setup wizard or use\n' 'the --force flag to overwrite it.' ) ctx.exit(1) project_key = click.prompt('Project key on the issue tracker') base_branch = click.prompt('Integration branch', default='master') virtualenvs = ('.venv', '.env', 'venv', 'env') for p in virtualenvs: if os.path.exists(os.path.join(p, 'bin', 'activate')): venv = p break else: venv = '' venv_path = click.prompt('Path to virtual environment', default=venv) project_id = click.prompt('Project ID on Harvest', type=int) task_id = click.prompt('Task id on Harvest', type=int) config = configparser.ConfigParser() config.add_section('lancet') config.set('lancet', 'virtualenv', venv_path) config.add_section('tracker') config.set('tracker', 'default_project', project_key) config.add_section('harvest') config.set('harvest', 'project_id', str(project_id)) config.set('harvest', 'task_id', str(task_id)) config.add_section('repository') config.set('repository', 'base_branch', base_branch) with open(PROJECT_CONFIG, 'w') as fh: config.write(fh) click.secho('\nConfiguration correctly written to "{}".' .format(PROJECT_CONFIG), fg='green')
def set_ticks(self, number): '''Set the number of frames to animate. ''' self.max_index = number self.current_index = 0 self.slider.setMaximum(self.max_index-1) self.slider.setMinimum(0) self.slider.setPageStep(1)
Set the number of frames to animate.
Below is the the instruction that describes the task: ### Input: Set the number of frames to animate. ### Response: def set_ticks(self, number): '''Set the number of frames to animate. ''' self.max_index = number self.current_index = 0 self.slider.setMaximum(self.max_index-1) self.slider.setMinimum(0) self.slider.setPageStep(1)
def get(self, res_path, timeout=10.): """ Get operation. :param str res_path: Resource path. :param float timeout: Timeout in seconds. :rtype: tuple :return: Tuple with status code and response body. """ resp = requests.get( self.__res_uri(res_path), headers=self.__headers(), verify=False, auth=self.__auth(), timeout=timeout ) return ( resp.status_code, json.loads(resp.text) if resp.status_code == 200 else {} )
Get operation. :param str res_path: Resource path. :param float timeout: Timeout in seconds. :rtype: tuple :return: Tuple with status code and response body.
Below is the the instruction that describes the task: ### Input: Get operation. :param str res_path: Resource path. :param float timeout: Timeout in seconds. :rtype: tuple :return: Tuple with status code and response body. ### Response: def get(self, res_path, timeout=10.): """ Get operation. :param str res_path: Resource path. :param float timeout: Timeout in seconds. :rtype: tuple :return: Tuple with status code and response body. """ resp = requests.get( self.__res_uri(res_path), headers=self.__headers(), verify=False, auth=self.__auth(), timeout=timeout ) return ( resp.status_code, json.loads(resp.text) if resp.status_code == 200 else {} )
def _create_service_nwk(self, tenant_id, tenant_name, direc): """Function to create the service in network in DCNM. """ net_dict = self.retrieve_dcnm_net_info(tenant_id, direc) net = utils.Dict2Obj(net_dict) subnet_dict = self.retrieve_dcnm_subnet_info(tenant_id, direc) subnet = utils.Dict2Obj(subnet_dict) try: self.dcnm_obj.create_service_network(tenant_name, net, subnet) except dexc.DfaClientRequestFailed: LOG.error("Failed to create network in DCNM %s", direc) return False return True
Function to create the service in network in DCNM.
Below is the the instruction that describes the task: ### Input: Function to create the service in network in DCNM. ### Response: def _create_service_nwk(self, tenant_id, tenant_name, direc): """Function to create the service in network in DCNM. """ net_dict = self.retrieve_dcnm_net_info(tenant_id, direc) net = utils.Dict2Obj(net_dict) subnet_dict = self.retrieve_dcnm_subnet_info(tenant_id, direc) subnet = utils.Dict2Obj(subnet_dict) try: self.dcnm_obj.create_service_network(tenant_name, net, subnet) except dexc.DfaClientRequestFailed: LOG.error("Failed to create network in DCNM %s", direc) return False return True
def simxGetObjectSelection(clientID, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' objectCount = ct.c_int() objectHandles = ct.POINTER(ct.c_int)() ret = c_GetObjectSelection(clientID, ct.byref(objectHandles), ct.byref(objectCount), operationMode) newobj = [] if ret == 0: for i in range(objectCount.value): newobj.append(objectHandles[i]) return ret, newobj
Please have a look at the function description/documentation in the V-REP user manual
Below is the the instruction that describes the task: ### Input: Please have a look at the function description/documentation in the V-REP user manual ### Response: def simxGetObjectSelection(clientID, operationMode): ''' Please have a look at the function description/documentation in the V-REP user manual ''' objectCount = ct.c_int() objectHandles = ct.POINTER(ct.c_int)() ret = c_GetObjectSelection(clientID, ct.byref(objectHandles), ct.byref(objectCount), operationMode) newobj = [] if ret == 0: for i in range(objectCount.value): newobj.append(objectHandles[i]) return ret, newobj
def _tz_offset_string(offset): """(Internal) Convert TZ offset in minutes east to string.""" s = "" io = int(offset) if io == 0: s += "Z" else: if -1440 < io < 1440: ho = abs(io) / 60 mo = abs(io) % 60 s += "%c%02u" % ("+" if io > 0 else "-", ho) if mo != 0: s += ":%02u" % mo else: raise ValueError("Timezone `offset` (%u) out of range " "-1439 to +1439 minutes" % io) return s
(Internal) Convert TZ offset in minutes east to string.
Below is the the instruction that describes the task: ### Input: (Internal) Convert TZ offset in minutes east to string. ### Response: def _tz_offset_string(offset): """(Internal) Convert TZ offset in minutes east to string.""" s = "" io = int(offset) if io == 0: s += "Z" else: if -1440 < io < 1440: ho = abs(io) / 60 mo = abs(io) % 60 s += "%c%02u" % ("+" if io > 0 else "-", ho) if mo != 0: s += ":%02u" % mo else: raise ValueError("Timezone `offset` (%u) out of range " "-1439 to +1439 minutes" % io) return s
def get_type_info(obj): """Get type information for a Python object Args: obj: The Python object Returns: tuple: (object type "catagory", object type name) """ if isinstance(obj, primitive_types): return ('primitive', type(obj).__name__) if isinstance(obj, sequence_types): return ('sequence', type(obj).__name__) if isinstance(obj, array_types): return ('array', type(obj).__name__) if isinstance(obj, key_value_types): return ('key-value', type(obj).__name__) if isinstance(obj, types.ModuleType): return ('module', type(obj).__name__) if isinstance(obj, (types.FunctionType, types.MethodType)): return ('function', type(obj).__name__) if isinstance(obj, type): if hasattr(obj, '__dict__'): return ('class', obj.__name__) if isinstance(type(obj), type): if hasattr(obj, '__dict__'): cls_name = type(obj).__name__ if cls_name == 'classobj': cls_name = obj.__name__ return ('class', '{}'.format(cls_name)) if cls_name == 'instance': cls_name = obj.__class__.__name__ return ('instance', '{} instance'.format(cls_name)) return ('unknown', type(obj).__name__)
Get type information for a Python object Args: obj: The Python object Returns: tuple: (object type "catagory", object type name)
Below is the the instruction that describes the task: ### Input: Get type information for a Python object Args: obj: The Python object Returns: tuple: (object type "catagory", object type name) ### Response: def get_type_info(obj): """Get type information for a Python object Args: obj: The Python object Returns: tuple: (object type "catagory", object type name) """ if isinstance(obj, primitive_types): return ('primitive', type(obj).__name__) if isinstance(obj, sequence_types): return ('sequence', type(obj).__name__) if isinstance(obj, array_types): return ('array', type(obj).__name__) if isinstance(obj, key_value_types): return ('key-value', type(obj).__name__) if isinstance(obj, types.ModuleType): return ('module', type(obj).__name__) if isinstance(obj, (types.FunctionType, types.MethodType)): return ('function', type(obj).__name__) if isinstance(obj, type): if hasattr(obj, '__dict__'): return ('class', obj.__name__) if isinstance(type(obj), type): if hasattr(obj, '__dict__'): cls_name = type(obj).__name__ if cls_name == 'classobj': cls_name = obj.__name__ return ('class', '{}'.format(cls_name)) if cls_name == 'instance': cls_name = obj.__class__.__name__ return ('instance', '{} instance'.format(cls_name)) return ('unknown', type(obj).__name__)
def update_driver(self, prompt): """Update driver based on new prompt.""" prompt = prompt.lstrip() self.chain.connection.log("({}): Prompt: '{}'".format(self.driver.platform, prompt)) self.prompt = prompt driver_name = self.driver.update_driver(prompt) if driver_name is None: self.chain.connection.log("New driver not detected. Using existing {} driver.".format(self.driver.platform)) return self.driver_name = driver_name
Update driver based on new prompt.
Below is the the instruction that describes the task: ### Input: Update driver based on new prompt. ### Response: def update_driver(self, prompt): """Update driver based on new prompt.""" prompt = prompt.lstrip() self.chain.connection.log("({}): Prompt: '{}'".format(self.driver.platform, prompt)) self.prompt = prompt driver_name = self.driver.update_driver(prompt) if driver_name is None: self.chain.connection.log("New driver not detected. Using existing {} driver.".format(self.driver.platform)) return self.driver_name = driver_name
def gen_random_mobile(): """ 随机生成一个手机号 :return: * str: (string) 手机号 举例如下:: print('--- gen_random_mobile demo ---') print(gen_random_mobile()) print(gen_random_mobile()) print('---') 执行结果:: --- gen_random_mobile demo --- 16706146773 14402633925 --- """ prefix_list = ["13", "1400", "1410", "1440", "145", "146", "147", "148", "15", "162", "165", "166", "167", "170", "171", "172", "173", "175", "176", "177", "178", "1740", "18", "191", "198", "199"] prefix_str = random.choice(prefix_list) return prefix_str + "".join(random.choice("0123456789") for _ in range(11 - len(prefix_str)))
随机生成一个手机号 :return: * str: (string) 手机号 举例如下:: print('--- gen_random_mobile demo ---') print(gen_random_mobile()) print(gen_random_mobile()) print('---') 执行结果:: --- gen_random_mobile demo --- 16706146773 14402633925 ---
Below is the the instruction that describes the task: ### Input: 随机生成一个手机号 :return: * str: (string) 手机号 举例如下:: print('--- gen_random_mobile demo ---') print(gen_random_mobile()) print(gen_random_mobile()) print('---') 执行结果:: --- gen_random_mobile demo --- 16706146773 14402633925 --- ### Response: def gen_random_mobile(): """ 随机生成一个手机号 :return: * str: (string) 手机号 举例如下:: print('--- gen_random_mobile demo ---') print(gen_random_mobile()) print(gen_random_mobile()) print('---') 执行结果:: --- gen_random_mobile demo --- 16706146773 14402633925 --- """ prefix_list = ["13", "1400", "1410", "1440", "145", "146", "147", "148", "15", "162", "165", "166", "167", "170", "171", "172", "173", "175", "176", "177", "178", "1740", "18", "191", "198", "199"] prefix_str = random.choice(prefix_list) return prefix_str + "".join(random.choice("0123456789") for _ in range(11 - len(prefix_str)))
def write_log(title, message=''): """Write formatted log message to stderr.""" sys.stderr.write(''.join([ title.center(40).center(60, '-'), '\n', message ]))
Write formatted log message to stderr.
Below is the the instruction that describes the task: ### Input: Write formatted log message to stderr. ### Response: def write_log(title, message=''): """Write formatted log message to stderr.""" sys.stderr.write(''.join([ title.center(40).center(60, '-'), '\n', message ]))
def pre_dispatch(self): """ List of pre-dispatch methods from registered middleware. """ middleware = sort_by_priority(self) return tuple(m.pre_dispatch for m in middleware if hasattr(m, 'pre_dispatch'))
List of pre-dispatch methods from registered middleware.
Below is the the instruction that describes the task: ### Input: List of pre-dispatch methods from registered middleware. ### Response: def pre_dispatch(self): """ List of pre-dispatch methods from registered middleware. """ middleware = sort_by_priority(self) return tuple(m.pre_dispatch for m in middleware if hasattr(m, 'pre_dispatch'))
def read_header(stream): """Return tuple(version_long, num_entries) from the given stream""" type_id = stream.read(4) if type_id != b"DIRC": raise AssertionError("Invalid index file header: %r" % type_id) version, num_entries = unpack(">LL", stream.read(4 * 2)) # TODO: handle version 3: extended data, see read-cache.c assert version in (1, 2) return version, num_entries
Return tuple(version_long, num_entries) from the given stream
Below is the the instruction that describes the task: ### Input: Return tuple(version_long, num_entries) from the given stream ### Response: def read_header(stream): """Return tuple(version_long, num_entries) from the given stream""" type_id = stream.read(4) if type_id != b"DIRC": raise AssertionError("Invalid index file header: %r" % type_id) version, num_entries = unpack(">LL", stream.read(4 * 2)) # TODO: handle version 3: extended data, see read-cache.c assert version in (1, 2) return version, num_entries
def depth_first(self, top_down=True): """ Iterate depth-first. :: >>> from uqbar.containers import UniqueTreeContainer, UniqueTreeNode >>> root_container = UniqueTreeContainer(name="root") >>> outer_container = UniqueTreeContainer(name="outer") >>> inner_container = UniqueTreeContainer(name="inner") >>> node_a = UniqueTreeNode(name="a") >>> node_b = UniqueTreeNode(name="b") >>> node_c = UniqueTreeNode(name="c") >>> node_d = UniqueTreeNode(name="d") >>> root_container.extend([node_a, outer_container]) >>> outer_container.extend([inner_container, node_d]) >>> inner_container.extend([node_b, node_c]) :: >>> for node in root_container.depth_first(): ... print(node.name) ... a outer inner b c d :: >>> for node in root_container.depth_first(top_down=False): ... print(node.name) ... a b c inner d outer """ for child in tuple(self): if top_down: yield child if isinstance(child, UniqueTreeContainer): yield from child.depth_first(top_down=top_down) if not top_down: yield child
Iterate depth-first. :: >>> from uqbar.containers import UniqueTreeContainer, UniqueTreeNode >>> root_container = UniqueTreeContainer(name="root") >>> outer_container = UniqueTreeContainer(name="outer") >>> inner_container = UniqueTreeContainer(name="inner") >>> node_a = UniqueTreeNode(name="a") >>> node_b = UniqueTreeNode(name="b") >>> node_c = UniqueTreeNode(name="c") >>> node_d = UniqueTreeNode(name="d") >>> root_container.extend([node_a, outer_container]) >>> outer_container.extend([inner_container, node_d]) >>> inner_container.extend([node_b, node_c]) :: >>> for node in root_container.depth_first(): ... print(node.name) ... a outer inner b c d :: >>> for node in root_container.depth_first(top_down=False): ... print(node.name) ... a b c inner d outer
Below is the the instruction that describes the task: ### Input: Iterate depth-first. :: >>> from uqbar.containers import UniqueTreeContainer, UniqueTreeNode >>> root_container = UniqueTreeContainer(name="root") >>> outer_container = UniqueTreeContainer(name="outer") >>> inner_container = UniqueTreeContainer(name="inner") >>> node_a = UniqueTreeNode(name="a") >>> node_b = UniqueTreeNode(name="b") >>> node_c = UniqueTreeNode(name="c") >>> node_d = UniqueTreeNode(name="d") >>> root_container.extend([node_a, outer_container]) >>> outer_container.extend([inner_container, node_d]) >>> inner_container.extend([node_b, node_c]) :: >>> for node in root_container.depth_first(): ... print(node.name) ... a outer inner b c d :: >>> for node in root_container.depth_first(top_down=False): ... print(node.name) ... a b c inner d outer ### Response: def depth_first(self, top_down=True): """ Iterate depth-first. :: >>> from uqbar.containers import UniqueTreeContainer, UniqueTreeNode >>> root_container = UniqueTreeContainer(name="root") >>> outer_container = UniqueTreeContainer(name="outer") >>> inner_container = UniqueTreeContainer(name="inner") >>> node_a = UniqueTreeNode(name="a") >>> node_b = UniqueTreeNode(name="b") >>> node_c = UniqueTreeNode(name="c") >>> node_d = UniqueTreeNode(name="d") >>> root_container.extend([node_a, outer_container]) >>> outer_container.extend([inner_container, node_d]) >>> inner_container.extend([node_b, node_c]) :: >>> for node in root_container.depth_first(): ... print(node.name) ... a outer inner b c d :: >>> for node in root_container.depth_first(top_down=False): ... print(node.name) ... a b c inner d outer """ for child in tuple(self): if top_down: yield child if isinstance(child, UniqueTreeContainer): yield from child.depth_first(top_down=top_down) if not top_down: yield child
def models(cls, api_version=DEFAULT_API_VERSION): """Module depends on the API version: * 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.network.v2015_06_15.models>` * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.network.v2016_09_01.models>` * 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.network.v2016_12_01.models>` * 2017-03-01: :mod:`v2017_03_01.models<azure.mgmt.network.v2017_03_01.models>` * 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.network.v2017_06_01.models>` * 2017-08-01: :mod:`v2017_08_01.models<azure.mgmt.network.v2017_08_01.models>` * 2017-09-01: :mod:`v2017_09_01.models<azure.mgmt.network.v2017_09_01.models>` * 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.network.v2017_10_01.models>` * 2017-11-01: :mod:`v2017_11_01.models<azure.mgmt.network.v2017_11_01.models>` * 2018-01-01: :mod:`v2018_01_01.models<azure.mgmt.network.v2018_01_01.models>` * 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.network.v2018_02_01.models>` * 2018-04-01: :mod:`v2018_04_01.models<azure.mgmt.network.v2018_04_01.models>` """ if api_version == '2015-06-15': from .v2015_06_15 import models return models elif api_version == '2016-09-01': from .v2016_09_01 import models return models elif api_version == '2016-12-01': from .v2016_12_01 import models return models elif api_version == '2017-03-01': from .v2017_03_01 import models return models elif api_version == '2017-06-01': from .v2017_06_01 import models return models elif api_version == '2017-08-01': from .v2017_08_01 import models return models elif api_version == '2017-09-01': from .v2017_09_01 import models return models elif api_version == '2017-10-01': from .v2017_10_01 import models return models elif api_version == '2017-11-01': from .v2017_11_01 import models return models elif api_version == '2018-01-01': from .v2018_01_01 import models return models elif api_version == '2018-02-01': from .v2018_02_01 import models return models elif api_version == '2018-04-01': from .v2018_04_01 import models return models raise NotImplementedError("APIVersion {} is not available".format(api_version))
Module depends on the API version: * 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.network.v2015_06_15.models>` * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.network.v2016_09_01.models>` * 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.network.v2016_12_01.models>` * 2017-03-01: :mod:`v2017_03_01.models<azure.mgmt.network.v2017_03_01.models>` * 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.network.v2017_06_01.models>` * 2017-08-01: :mod:`v2017_08_01.models<azure.mgmt.network.v2017_08_01.models>` * 2017-09-01: :mod:`v2017_09_01.models<azure.mgmt.network.v2017_09_01.models>` * 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.network.v2017_10_01.models>` * 2017-11-01: :mod:`v2017_11_01.models<azure.mgmt.network.v2017_11_01.models>` * 2018-01-01: :mod:`v2018_01_01.models<azure.mgmt.network.v2018_01_01.models>` * 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.network.v2018_02_01.models>` * 2018-04-01: :mod:`v2018_04_01.models<azure.mgmt.network.v2018_04_01.models>`
Below is the the instruction that describes the task: ### Input: Module depends on the API version: * 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.network.v2015_06_15.models>` * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.network.v2016_09_01.models>` * 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.network.v2016_12_01.models>` * 2017-03-01: :mod:`v2017_03_01.models<azure.mgmt.network.v2017_03_01.models>` * 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.network.v2017_06_01.models>` * 2017-08-01: :mod:`v2017_08_01.models<azure.mgmt.network.v2017_08_01.models>` * 2017-09-01: :mod:`v2017_09_01.models<azure.mgmt.network.v2017_09_01.models>` * 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.network.v2017_10_01.models>` * 2017-11-01: :mod:`v2017_11_01.models<azure.mgmt.network.v2017_11_01.models>` * 2018-01-01: :mod:`v2018_01_01.models<azure.mgmt.network.v2018_01_01.models>` * 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.network.v2018_02_01.models>` * 2018-04-01: :mod:`v2018_04_01.models<azure.mgmt.network.v2018_04_01.models>` ### Response: def models(cls, api_version=DEFAULT_API_VERSION): """Module depends on the API version: * 2015-06-15: :mod:`v2015_06_15.models<azure.mgmt.network.v2015_06_15.models>` * 2016-09-01: :mod:`v2016_09_01.models<azure.mgmt.network.v2016_09_01.models>` * 2016-12-01: :mod:`v2016_12_01.models<azure.mgmt.network.v2016_12_01.models>` * 2017-03-01: :mod:`v2017_03_01.models<azure.mgmt.network.v2017_03_01.models>` * 2017-06-01: :mod:`v2017_06_01.models<azure.mgmt.network.v2017_06_01.models>` * 2017-08-01: :mod:`v2017_08_01.models<azure.mgmt.network.v2017_08_01.models>` * 2017-09-01: :mod:`v2017_09_01.models<azure.mgmt.network.v2017_09_01.models>` * 2017-10-01: :mod:`v2017_10_01.models<azure.mgmt.network.v2017_10_01.models>` * 2017-11-01: :mod:`v2017_11_01.models<azure.mgmt.network.v2017_11_01.models>` * 2018-01-01: :mod:`v2018_01_01.models<azure.mgmt.network.v2018_01_01.models>` * 2018-02-01: :mod:`v2018_02_01.models<azure.mgmt.network.v2018_02_01.models>` * 2018-04-01: :mod:`v2018_04_01.models<azure.mgmt.network.v2018_04_01.models>` """ if api_version == '2015-06-15': from .v2015_06_15 import models return models elif api_version == '2016-09-01': from .v2016_09_01 import models return models elif api_version == '2016-12-01': from .v2016_12_01 import models return models elif api_version == '2017-03-01': from .v2017_03_01 import models return models elif api_version == '2017-06-01': from .v2017_06_01 import models return models elif api_version == '2017-08-01': from .v2017_08_01 import models return models elif api_version == '2017-09-01': from .v2017_09_01 import models return models elif api_version == '2017-10-01': from .v2017_10_01 import models return models elif api_version == '2017-11-01': from .v2017_11_01 import models return models elif api_version == '2018-01-01': from .v2018_01_01 import models return models elif api_version == '2018-02-01': from .v2018_02_01 import models return models elif api_version == '2018-04-01': from .v2018_04_01 import models return models raise NotImplementedError("APIVersion {} is not available".format(api_version))
def splitStis(stisfile, sci_count): """ Split a STIS association file into multiple imset MEF files. Split the corresponding spt file if present into single spt files. If an spt file can't be split or is missing a Warning is printed. Returns ------- names: list a list with the names of the new flt files. """ newfiles = [] toclose = False if isinstance(stisfile, str): f = fits.open(stisfile) toclose = True else: f = stisfile hdu0 = f[0].copy() stisfilename = stisfile.filename() for count in range(1,sci_count+1): fitsobj = fits.HDUList() fitsobj.append(hdu0) hdu = f[('sci',count)].copy() fitsobj.append(hdu) rootname = hdu.header['EXPNAME'] newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits') try: # Verify error array exists if f[('err', count)].data is None: raise ValueError # Verify dq array exists if f[('dq', count)].data is None: raise ValueError # Copy the err extension hdu = f[('err',count)].copy() fitsobj.append(hdu) # Copy the dq extension hdu = f[('dq',count)].copy() fitsobj.append(hdu) fitsobj[1].header['EXTVER'] = 1 fitsobj[2].header['EXTVER'] = 1 fitsobj[3].header['EXTVER'] = 1 except ValueError: print('\nWarning:') print('Extension version %d of the input file %s does not' %(count, stisfile)) print('contain all required image extensions. Each must contain') print('populates SCI, ERR and DQ arrays.') continue # Determine if the file you wish to create already exists on the disk. # If the file does exist, replace it. if (os.path.exists(newfilename)): os.remove(newfilename) print(" Replacing "+newfilename+"...") # Write out the new file fitsobj.writeto(newfilename) # Insure returned HDUList is associated with a file fitsobj.close() fitsobj = fits.open(newfilename, mode='update') newfiles.append(fitsobj) # Return HDUList, not filename f.close() sptfilename = fileutil.buildNewRootname(stisfilename, extn='_spt.fits') try: sptfile = fits.open(sptfilename) except IOError: print('SPT file not found %s \n' % sptfilename) return newfiles if sptfile: hdu0 = sptfile[0].copy() try: for count in range(1,sci_count+1): fitsobj = fits.HDUList() fitsobj.append(hdu0) hdu = sptfile[count].copy() fitsobj.append(hdu) rootname = hdu.header['EXPNAME'] newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits') fitsobj[1].header['EXTVER'] = 1 if (os.path.exists(newfilename)): os.remove(newfilename) print(" Replacing "+newfilename+"...") # Write out the new file fitsobj.writeto(newfilename) except: print("Warning: Unable to split spt file %s " % sptfilename) if toclose: sptfile.close() return newfiles
Split a STIS association file into multiple imset MEF files. Split the corresponding spt file if present into single spt files. If an spt file can't be split or is missing a Warning is printed. Returns ------- names: list a list with the names of the new flt files.
Below is the the instruction that describes the task: ### Input: Split a STIS association file into multiple imset MEF files. Split the corresponding spt file if present into single spt files. If an spt file can't be split or is missing a Warning is printed. Returns ------- names: list a list with the names of the new flt files. ### Response: def splitStis(stisfile, sci_count): """ Split a STIS association file into multiple imset MEF files. Split the corresponding spt file if present into single spt files. If an spt file can't be split or is missing a Warning is printed. Returns ------- names: list a list with the names of the new flt files. """ newfiles = [] toclose = False if isinstance(stisfile, str): f = fits.open(stisfile) toclose = True else: f = stisfile hdu0 = f[0].copy() stisfilename = stisfile.filename() for count in range(1,sci_count+1): fitsobj = fits.HDUList() fitsobj.append(hdu0) hdu = f[('sci',count)].copy() fitsobj.append(hdu) rootname = hdu.header['EXPNAME'] newfilename = fileutil.buildNewRootname(rootname, extn='_flt.fits') try: # Verify error array exists if f[('err', count)].data is None: raise ValueError # Verify dq array exists if f[('dq', count)].data is None: raise ValueError # Copy the err extension hdu = f[('err',count)].copy() fitsobj.append(hdu) # Copy the dq extension hdu = f[('dq',count)].copy() fitsobj.append(hdu) fitsobj[1].header['EXTVER'] = 1 fitsobj[2].header['EXTVER'] = 1 fitsobj[3].header['EXTVER'] = 1 except ValueError: print('\nWarning:') print('Extension version %d of the input file %s does not' %(count, stisfile)) print('contain all required image extensions. Each must contain') print('populates SCI, ERR and DQ arrays.') continue # Determine if the file you wish to create already exists on the disk. # If the file does exist, replace it. if (os.path.exists(newfilename)): os.remove(newfilename) print(" Replacing "+newfilename+"...") # Write out the new file fitsobj.writeto(newfilename) # Insure returned HDUList is associated with a file fitsobj.close() fitsobj = fits.open(newfilename, mode='update') newfiles.append(fitsobj) # Return HDUList, not filename f.close() sptfilename = fileutil.buildNewRootname(stisfilename, extn='_spt.fits') try: sptfile = fits.open(sptfilename) except IOError: print('SPT file not found %s \n' % sptfilename) return newfiles if sptfile: hdu0 = sptfile[0].copy() try: for count in range(1,sci_count+1): fitsobj = fits.HDUList() fitsobj.append(hdu0) hdu = sptfile[count].copy() fitsobj.append(hdu) rootname = hdu.header['EXPNAME'] newfilename = fileutil.buildNewRootname(rootname, extn='_spt.fits') fitsobj[1].header['EXTVER'] = 1 if (os.path.exists(newfilename)): os.remove(newfilename) print(" Replacing "+newfilename+"...") # Write out the new file fitsobj.writeto(newfilename) except: print("Warning: Unable to split spt file %s " % sptfilename) if toclose: sptfile.close() return newfiles
def add_data_attribute(self, data_attr): '''Interprets a DATA attribute and add it to the datastream.''' if data_attr.header.attr_type_id is not AttrTypes.DATA: raise DataStreamError("Invalid attribute. A Datastream deals only with DATA attributes") if data_attr.header.attr_name != self.name: raise DataStreamError(f"Data from a different stream '{data_attr.header.attr_name}' cannot be add to this stream") if data_attr.header.non_resident: nonr_header = data_attr.header if self._data_runs is None: self._data_runs = [] if nonr_header.end_vcn > self.cluster_count: self.cluster_count = nonr_header.end_vcn if not nonr_header.start_vcn: #start_vcn == 0 self.size = nonr_header.curr_sstream self.alloc_size = nonr_header.alloc_sstream self._data_runs.append((nonr_header.start_vcn, nonr_header.data_runs)) self._data_runs_sorted = False else: #if it is resident self.size = self.alloc_size = data_attr.header.content_len self._pending_processing = None #respects mft_config["load_data"] self._content = data_attr.content.content
Interprets a DATA attribute and add it to the datastream.
Below is the the instruction that describes the task: ### Input: Interprets a DATA attribute and add it to the datastream. ### Response: def add_data_attribute(self, data_attr): '''Interprets a DATA attribute and add it to the datastream.''' if data_attr.header.attr_type_id is not AttrTypes.DATA: raise DataStreamError("Invalid attribute. A Datastream deals only with DATA attributes") if data_attr.header.attr_name != self.name: raise DataStreamError(f"Data from a different stream '{data_attr.header.attr_name}' cannot be add to this stream") if data_attr.header.non_resident: nonr_header = data_attr.header if self._data_runs is None: self._data_runs = [] if nonr_header.end_vcn > self.cluster_count: self.cluster_count = nonr_header.end_vcn if not nonr_header.start_vcn: #start_vcn == 0 self.size = nonr_header.curr_sstream self.alloc_size = nonr_header.alloc_sstream self._data_runs.append((nonr_header.start_vcn, nonr_header.data_runs)) self._data_runs_sorted = False else: #if it is resident self.size = self.alloc_size = data_attr.header.content_len self._pending_processing = None #respects mft_config["load_data"] self._content = data_attr.content.content
def search( self ): """ Looks up the current search terms from the xdk files that are loaded. """ QApplication.instance().setOverrideCursor(Qt.WaitCursor) terms = nativestring(self.uiSearchTXT.text()) html = [] entry_html = '<a href="%(url)s">%(title)s</a><br/>'\ '<small>%(url)s</small>' for i in range(self.uiContentsTREE.topLevelItemCount()): item = self.uiContentsTREE.topLevelItem(i) results = item.search(terms) results.sort(lambda x, y: cmp(y['strength'], x['strength'])) for item in results: html.append( entry_html % item ) if ( not html ): html.append('<b>No results were found for %s</b>' % terms) self.uiSearchWEB.setHtml(SEARCH_HTML % '<br/><br/>'.join(html)) QApplication.instance().restoreOverrideCursor()
Looks up the current search terms from the xdk files that are loaded.
Below is the the instruction that describes the task: ### Input: Looks up the current search terms from the xdk files that are loaded. ### Response: def search( self ): """ Looks up the current search terms from the xdk files that are loaded. """ QApplication.instance().setOverrideCursor(Qt.WaitCursor) terms = nativestring(self.uiSearchTXT.text()) html = [] entry_html = '<a href="%(url)s">%(title)s</a><br/>'\ '<small>%(url)s</small>' for i in range(self.uiContentsTREE.topLevelItemCount()): item = self.uiContentsTREE.topLevelItem(i) results = item.search(terms) results.sort(lambda x, y: cmp(y['strength'], x['strength'])) for item in results: html.append( entry_html % item ) if ( not html ): html.append('<b>No results were found for %s</b>' % terms) self.uiSearchWEB.setHtml(SEARCH_HTML % '<br/><br/>'.join(html)) QApplication.instance().restoreOverrideCursor()
def _bio_to_string(bio): """ Copy the contents of an OpenSSL BIO object into a Python byte string. """ result_buffer = _ffi.new('char**') buffer_length = _lib.BIO_get_mem_data(bio, result_buffer) return _ffi.buffer(result_buffer[0], buffer_length)[:]
Copy the contents of an OpenSSL BIO object into a Python byte string.
Below is the the instruction that describes the task: ### Input: Copy the contents of an OpenSSL BIO object into a Python byte string. ### Response: def _bio_to_string(bio): """ Copy the contents of an OpenSSL BIO object into a Python byte string. """ result_buffer = _ffi.new('char**') buffer_length = _lib.BIO_get_mem_data(bio, result_buffer) return _ffi.buffer(result_buffer[0], buffer_length)[:]
def set_centralized_assembled_rows_cols(self, irn, jcn): """Set assembled matrix indices on processor 0. The row and column indices (irn & jcn) should be one based. """ if self.myid != 0: return assert irn.size == jcn.size self._refs.update(irn=irn, jcn=jcn) self.id.nz = irn.size self.id.irn = self.cast_array(irn) self.id.jcn = self.cast_array(jcn)
Set assembled matrix indices on processor 0. The row and column indices (irn & jcn) should be one based.
Below is the the instruction that describes the task: ### Input: Set assembled matrix indices on processor 0. The row and column indices (irn & jcn) should be one based. ### Response: def set_centralized_assembled_rows_cols(self, irn, jcn): """Set assembled matrix indices on processor 0. The row and column indices (irn & jcn) should be one based. """ if self.myid != 0: return assert irn.size == jcn.size self._refs.update(irn=irn, jcn=jcn) self.id.nz = irn.size self.id.irn = self.cast_array(irn) self.id.jcn = self.cast_array(jcn)
def availability_set_create_or_update(name, resource_group, **kwargs): # pylint: disable=invalid-name ''' .. versionadded:: 2019.2.0 Create or update an availability set. :param name: The availability set to create. :param resource_group: The resource group name assigned to the availability set. CLI Example: .. code-block:: bash salt-call azurearm_compute.availability_set_create_or_update testset testgroup ''' if 'location' not in kwargs: rg_props = __salt__['azurearm_resource.resource_group_get']( resource_group, **kwargs ) if 'error' in rg_props: log.error( 'Unable to determine location from resource group specified.' ) return False kwargs['location'] = rg_props['location'] compconn = __utils__['azurearm.get_client']('compute', **kwargs) # Use VM names to link to the IDs of existing VMs. if isinstance(kwargs.get('virtual_machines'), list): vm_list = [] for vm_name in kwargs.get('virtual_machines'): vm_instance = __salt__['azurearm_compute.virtual_machine_get']( name=vm_name, resource_group=resource_group, **kwargs ) if 'error' not in vm_instance: vm_list.append({'id': str(vm_instance['id'])}) kwargs['virtual_machines'] = vm_list try: setmodel = __utils__['azurearm.create_object_model']('compute', 'AvailabilitySet', **kwargs) except TypeError as exc: result = {'error': 'The object model could not be built. ({0})'.format(str(exc))} return result try: av_set = compconn.availability_sets.create_or_update( resource_group_name=resource_group, availability_set_name=name, parameters=setmodel ) result = av_set.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs) result = {'error': str(exc)} except SerializationError as exc: result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))} return result
.. versionadded:: 2019.2.0 Create or update an availability set. :param name: The availability set to create. :param resource_group: The resource group name assigned to the availability set. CLI Example: .. code-block:: bash salt-call azurearm_compute.availability_set_create_or_update testset testgroup
Below is the the instruction that describes the task: ### Input: .. versionadded:: 2019.2.0 Create or update an availability set. :param name: The availability set to create. :param resource_group: The resource group name assigned to the availability set. CLI Example: .. code-block:: bash salt-call azurearm_compute.availability_set_create_or_update testset testgroup ### Response: def availability_set_create_or_update(name, resource_group, **kwargs): # pylint: disable=invalid-name ''' .. versionadded:: 2019.2.0 Create or update an availability set. :param name: The availability set to create. :param resource_group: The resource group name assigned to the availability set. CLI Example: .. code-block:: bash salt-call azurearm_compute.availability_set_create_or_update testset testgroup ''' if 'location' not in kwargs: rg_props = __salt__['azurearm_resource.resource_group_get']( resource_group, **kwargs ) if 'error' in rg_props: log.error( 'Unable to determine location from resource group specified.' ) return False kwargs['location'] = rg_props['location'] compconn = __utils__['azurearm.get_client']('compute', **kwargs) # Use VM names to link to the IDs of existing VMs. if isinstance(kwargs.get('virtual_machines'), list): vm_list = [] for vm_name in kwargs.get('virtual_machines'): vm_instance = __salt__['azurearm_compute.virtual_machine_get']( name=vm_name, resource_group=resource_group, **kwargs ) if 'error' not in vm_instance: vm_list.append({'id': str(vm_instance['id'])}) kwargs['virtual_machines'] = vm_list try: setmodel = __utils__['azurearm.create_object_model']('compute', 'AvailabilitySet', **kwargs) except TypeError as exc: result = {'error': 'The object model could not be built. ({0})'.format(str(exc))} return result try: av_set = compconn.availability_sets.create_or_update( resource_group_name=resource_group, availability_set_name=name, parameters=setmodel ) result = av_set.as_dict() except CloudError as exc: __utils__['azurearm.log_cloud_error']('compute', str(exc), **kwargs) result = {'error': str(exc)} except SerializationError as exc: result = {'error': 'The object model could not be parsed. ({0})'.format(str(exc))} return result
def route(cls, path): """A decorator to indicate that a method should be a routable HTTP endpoint. .. code-block:: python from compactor.process import Process class WebProcess(Process): @Process.route('/hello/world') def hello_world(self, handler): return handler.write('<html><title>hello world</title></html>') The handler passed to the method is a tornado RequestHandler. WARNING: This interface is alpha and may change in the future if or when we remove tornado as a compactor dependency. :param path: The endpoint to route to this method. :type path: ``str`` """ if not path.startswith('/'): raise ValueError('Routes must start with "/"') def wrap(fn): setattr(fn, cls.ROUTE_ATTRIBUTE, path) return fn return wrap
A decorator to indicate that a method should be a routable HTTP endpoint. .. code-block:: python from compactor.process import Process class WebProcess(Process): @Process.route('/hello/world') def hello_world(self, handler): return handler.write('<html><title>hello world</title></html>') The handler passed to the method is a tornado RequestHandler. WARNING: This interface is alpha and may change in the future if or when we remove tornado as a compactor dependency. :param path: The endpoint to route to this method. :type path: ``str``
Below is the the instruction that describes the task: ### Input: A decorator to indicate that a method should be a routable HTTP endpoint. .. code-block:: python from compactor.process import Process class WebProcess(Process): @Process.route('/hello/world') def hello_world(self, handler): return handler.write('<html><title>hello world</title></html>') The handler passed to the method is a tornado RequestHandler. WARNING: This interface is alpha and may change in the future if or when we remove tornado as a compactor dependency. :param path: The endpoint to route to this method. :type path: ``str`` ### Response: def route(cls, path): """A decorator to indicate that a method should be a routable HTTP endpoint. .. code-block:: python from compactor.process import Process class WebProcess(Process): @Process.route('/hello/world') def hello_world(self, handler): return handler.write('<html><title>hello world</title></html>') The handler passed to the method is a tornado RequestHandler. WARNING: This interface is alpha and may change in the future if or when we remove tornado as a compactor dependency. :param path: The endpoint to route to this method. :type path: ``str`` """ if not path.startswith('/'): raise ValueError('Routes must start with "/"') def wrap(fn): setattr(fn, cls.ROUTE_ATTRIBUTE, path) return fn return wrap
def publish_active_scene(self, scene_id): """publish changed active scene""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_active(self.sequence_number, scene_id)) return self.sequence_number
publish changed active scene
Below is the the instruction that describes the task: ### Input: publish changed active scene ### Response: def publish_active_scene(self, scene_id): """publish changed active scene""" self.sequence_number += 1 self.publisher.send_multipart(msgs.MessageBuilder.scene_active(self.sequence_number, scene_id)) return self.sequence_number
def make_purge_data(parser): """ Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to purge Ceph data from', ) parser.set_defaults( func=purgedata, )
Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph
Below is the the instruction that describes the task: ### Input: Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph ### Response: def make_purge_data(parser): """ Purge (delete, destroy, discard, shred) any Ceph data from /var/lib/ceph """ parser.add_argument( 'host', metavar='HOST', nargs='+', help='hosts to purge Ceph data from', ) parser.set_defaults( func=purgedata, )
def sampleCellsWithinColumns(numCellPairs, cellsPerColumn, numColumns, seed=42): """ Generate indices of cell pairs, each pair of cells are from the same column @return cellPairs (list) list of cell pairs """ np.random.seed(seed) cellPairs = [] for i in range(numCellPairs): randCol = np.random.randint(numColumns) randCells = np.random.choice(np.arange(cellsPerColumn), (2, ), replace=False) cellsPair = randCol * cellsPerColumn + randCells cellPairs.append(cellsPair) return cellPairs
Generate indices of cell pairs, each pair of cells are from the same column @return cellPairs (list) list of cell pairs
Below is the the instruction that describes the task: ### Input: Generate indices of cell pairs, each pair of cells are from the same column @return cellPairs (list) list of cell pairs ### Response: def sampleCellsWithinColumns(numCellPairs, cellsPerColumn, numColumns, seed=42): """ Generate indices of cell pairs, each pair of cells are from the same column @return cellPairs (list) list of cell pairs """ np.random.seed(seed) cellPairs = [] for i in range(numCellPairs): randCol = np.random.randint(numColumns) randCells = np.random.choice(np.arange(cellsPerColumn), (2, ), replace=False) cellsPair = randCol * cellsPerColumn + randCells cellPairs.append(cellsPair) return cellPairs
def read_file(filename, prepend_paths=[]): """ Returns the contents of *filename* (UTF-8). If *prepend_paths* is set, join those before the *fielname*. If it is `True`, prepend the path to `setup.py`. """ if prepend_paths is True: prepend_paths = [ os.path.abspath(os.path.dirname(__file__)), ] if prepend_paths: prepend_paths.append(filename) filename = os.path.join(*prepend_paths) print(filename) with open(filename, encoding='utf-8') as f: return f.read()
Returns the contents of *filename* (UTF-8). If *prepend_paths* is set, join those before the *fielname*. If it is `True`, prepend the path to `setup.py`.
Below is the the instruction that describes the task: ### Input: Returns the contents of *filename* (UTF-8). If *prepend_paths* is set, join those before the *fielname*. If it is `True`, prepend the path to `setup.py`. ### Response: def read_file(filename, prepend_paths=[]): """ Returns the contents of *filename* (UTF-8). If *prepend_paths* is set, join those before the *fielname*. If it is `True`, prepend the path to `setup.py`. """ if prepend_paths is True: prepend_paths = [ os.path.abspath(os.path.dirname(__file__)), ] if prepend_paths: prepend_paths.append(filename) filename = os.path.join(*prepend_paths) print(filename) with open(filename, encoding='utf-8') as f: return f.read()
def delete_index(self): '''Deletes the underlying ES index. Only use this if you know what you're doing. This destroys the entire underlying ES index, which could be shared by multiple distinct ElasticStore instances. ''' if self.conn.indices.exists(index=self.index): self.conn.indices.delete(index=self.index)
Deletes the underlying ES index. Only use this if you know what you're doing. This destroys the entire underlying ES index, which could be shared by multiple distinct ElasticStore instances.
Below is the the instruction that describes the task: ### Input: Deletes the underlying ES index. Only use this if you know what you're doing. This destroys the entire underlying ES index, which could be shared by multiple distinct ElasticStore instances. ### Response: def delete_index(self): '''Deletes the underlying ES index. Only use this if you know what you're doing. This destroys the entire underlying ES index, which could be shared by multiple distinct ElasticStore instances. ''' if self.conn.indices.exists(index=self.index): self.conn.indices.delete(index=self.index)
def _open_for_csv(name, mode): """ Deal with Python 2/3 open API differences """ if sys.version_info[0] < 3: return open_rw(name, mode + 'b') return open_rw(name, mode, newline='', encoding='utf-8')
Deal with Python 2/3 open API differences
Below is the the instruction that describes the task: ### Input: Deal with Python 2/3 open API differences ### Response: def _open_for_csv(name, mode): """ Deal with Python 2/3 open API differences """ if sys.version_info[0] < 3: return open_rw(name, mode + 'b') return open_rw(name, mode, newline='', encoding='utf-8')
def ipv4_syntax_check(ip): # pragma: no cover """ Check the syntax of the given IPv4. :param ip: The IPv4 to check the syntax for. :type ip: str :return: The syntax validity. :rtype: bool .. warning:: If an empty or a non-string :code:`ip` is given, we return :code:`None`. """ if ip and isinstance(ip, str): # The given IP is not empty nor None. # and # * The given IP is a string. # We silently load the configuration. load_config(True) return Check(ip).is_ip_valid() # We return None, there is nothing to check. return None
Check the syntax of the given IPv4. :param ip: The IPv4 to check the syntax for. :type ip: str :return: The syntax validity. :rtype: bool .. warning:: If an empty or a non-string :code:`ip` is given, we return :code:`None`.
Below is the the instruction that describes the task: ### Input: Check the syntax of the given IPv4. :param ip: The IPv4 to check the syntax for. :type ip: str :return: The syntax validity. :rtype: bool .. warning:: If an empty or a non-string :code:`ip` is given, we return :code:`None`. ### Response: def ipv4_syntax_check(ip): # pragma: no cover """ Check the syntax of the given IPv4. :param ip: The IPv4 to check the syntax for. :type ip: str :return: The syntax validity. :rtype: bool .. warning:: If an empty or a non-string :code:`ip` is given, we return :code:`None`. """ if ip and isinstance(ip, str): # The given IP is not empty nor None. # and # * The given IP is a string. # We silently load the configuration. load_config(True) return Check(ip).is_ip_valid() # We return None, there is nothing to check. return None
def save_model(self, net): """Save the model. This function saves some or all of the following: - model parameters; - optimizer state; - training history; - entire model object. """ if self.f_params is not None: f = self._format_target(net, self.f_params, -1) self._save_params(f, net, "f_params", "model parameters") if self.f_optimizer is not None: f = self._format_target(net, self.f_optimizer, -1) self._save_params(f, net, "f_optimizer", "optimizer state") if self.f_history is not None: f = self.f_history_ self._save_params(f, net, "f_history", "history") if self.f_pickle: f_pickle = self._format_target(net, self.f_pickle, -1) with open_file_like(f_pickle, 'wb') as f: pickle.dump(net, f)
Save the model. This function saves some or all of the following: - model parameters; - optimizer state; - training history; - entire model object.
Below is the the instruction that describes the task: ### Input: Save the model. This function saves some or all of the following: - model parameters; - optimizer state; - training history; - entire model object. ### Response: def save_model(self, net): """Save the model. This function saves some or all of the following: - model parameters; - optimizer state; - training history; - entire model object. """ if self.f_params is not None: f = self._format_target(net, self.f_params, -1) self._save_params(f, net, "f_params", "model parameters") if self.f_optimizer is not None: f = self._format_target(net, self.f_optimizer, -1) self._save_params(f, net, "f_optimizer", "optimizer state") if self.f_history is not None: f = self.f_history_ self._save_params(f, net, "f_history", "history") if self.f_pickle: f_pickle = self._format_target(net, self.f_pickle, -1) with open_file_like(f_pickle, 'wb') as f: pickle.dump(net, f)
def get_keypair_dict(): """Returns dictionary of {keypairname: keypair}""" client = get_ec2_client() response = client.describe_key_pairs() assert is_good_response(response) result = {} ec2 = get_ec2_resource() for keypair in response['KeyPairs']: keypair_name = keypair.get('KeyName', '') if keypair_name in result: util.log(f"Warning: Duplicate key {keypair_name}") if DUPLICATE_CHECKING: assert keypair_name not in result, "Duplicate key " + keypair_name result[keypair_name] = ec2.KeyPair(keypair_name) return result
Returns dictionary of {keypairname: keypair}
Below is the the instruction that describes the task: ### Input: Returns dictionary of {keypairname: keypair} ### Response: def get_keypair_dict(): """Returns dictionary of {keypairname: keypair}""" client = get_ec2_client() response = client.describe_key_pairs() assert is_good_response(response) result = {} ec2 = get_ec2_resource() for keypair in response['KeyPairs']: keypair_name = keypair.get('KeyName', '') if keypair_name in result: util.log(f"Warning: Duplicate key {keypair_name}") if DUPLICATE_CHECKING: assert keypair_name not in result, "Duplicate key " + keypair_name result[keypair_name] = ec2.KeyPair(keypair_name) return result
def read(self, size=-1): """Read bytes and call the callback""" bites = self.file.read(size) self.bytes_read += len(bites) self.callback(len(bites), self.bytes_read) return bites
Read bytes and call the callback
Below is the the instruction that describes the task: ### Input: Read bytes and call the callback ### Response: def read(self, size=-1): """Read bytes and call the callback""" bites = self.file.read(size) self.bytes_read += len(bites) self.callback(len(bites), self.bytes_read) return bites
def get(obj, path, view=False, afilter=None): """Get the value of the given path. Arguments: obj -- Object to look in. path -- A list of keys representing the path. Keyword Arguments: view -- Return a view of the object. """ index = 0 path_count = len(path) - 1 target = obj head = type(target)() tail = head up = None for pair in path: key = pair[0] target = target[key] if view: if isinstance(tail, MutableMapping): if issubclass(pair[1], (MutableSequence, MutableMapping)) and index != path_count: tail[key] = pair[1]() else: tail[key] = target up = tail tail = tail[key] elif issubclass(tail.__class__, MutableSequence): if issubclass(pair[1], (MutableSequence, MutableMapping)) and index != path_count: tail.append(pair[1]()) else: tail.append(target) up = tail tail = tail[-1] if not issubclass(target.__class__, (MutableSequence, MutableMapping)): if (afilter and (not afilter(target))): raise dpath.exceptions.FilteredValue index += 1 if view: return head else: return target
Get the value of the given path. Arguments: obj -- Object to look in. path -- A list of keys representing the path. Keyword Arguments: view -- Return a view of the object.
Below is the the instruction that describes the task: ### Input: Get the value of the given path. Arguments: obj -- Object to look in. path -- A list of keys representing the path. Keyword Arguments: view -- Return a view of the object. ### Response: def get(obj, path, view=False, afilter=None): """Get the value of the given path. Arguments: obj -- Object to look in. path -- A list of keys representing the path. Keyword Arguments: view -- Return a view of the object. """ index = 0 path_count = len(path) - 1 target = obj head = type(target)() tail = head up = None for pair in path: key = pair[0] target = target[key] if view: if isinstance(tail, MutableMapping): if issubclass(pair[1], (MutableSequence, MutableMapping)) and index != path_count: tail[key] = pair[1]() else: tail[key] = target up = tail tail = tail[key] elif issubclass(tail.__class__, MutableSequence): if issubclass(pair[1], (MutableSequence, MutableMapping)) and index != path_count: tail.append(pair[1]()) else: tail.append(target) up = tail tail = tail[-1] if not issubclass(target.__class__, (MutableSequence, MutableMapping)): if (afilter and (not afilter(target))): raise dpath.exceptions.FilteredValue index += 1 if view: return head else: return target
def is_numerical(tg_type, inc_array=False): """Tells if the given tango type is numerical :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is a numerical or False otherwise :rtype: :py:obj:`bool` """ global _scalar_numerical_types, _array_numerical_types if tg_type in _scalar_numerical_types: return True if not inc_array: return False return tg_type in _array_numerical_types
Tells if the given tango type is numerical :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is a numerical or False otherwise :rtype: :py:obj:`bool`
Below is the the instruction that describes the task: ### Input: Tells if the given tango type is numerical :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is a numerical or False otherwise :rtype: :py:obj:`bool` ### Response: def is_numerical(tg_type, inc_array=False): """Tells if the given tango type is numerical :param tg_type: tango type :type tg_type: :class:`tango.CmdArgType` :param inc_array: (optional, default is False) determines if include array in the list of checked types :type inc_array: :py:obj:`bool` :return: True if the given tango type is a numerical or False otherwise :rtype: :py:obj:`bool` """ global _scalar_numerical_types, _array_numerical_types if tg_type in _scalar_numerical_types: return True if not inc_array: return False return tg_type in _array_numerical_types
def _kalman_prediction_step(k, p_m , p_P, p_dyn_model_callable, calc_grad_log_likelihood=False, p_dm = None, p_dP = None): """ Desctrete prediction function Input: k:int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Mean value from the previous step. For "multiple time series mode" it is matrix, second dimension of which correspond to different time series. p_P: Covariance matrix from the previous step. p_dyn_model_callable: class calc_grad_log_likelihood: boolean Whether to calculate gradient of the marginal likelihood of the state-space model. If true then the next parameter must provide the extra parameters for gradient calculation. p_dm: 3D array (state_dim, time_series_no, parameters_no) Mean derivatives from the previous step. For "multiple time series mode" it is 3D array, second dimension of which correspond to different time series. p_dP: 3D array (state_dim, state_dim, parameters_no) Mean derivatives from the previous step Output: ---------------------------- m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects Results of the prediction steps. """ # index correspond to values from previous iteration. A = p_dyn_model_callable.Ak(k,p_m,p_P) # state transition matrix (or Jacobian) Q = p_dyn_model_callable.Qk(k) # state noise matrix # Prediction step -> m_pred = p_dyn_model_callable.f_a(k, p_m, A) # predicted mean P_pred = A.dot(p_P).dot(A.T) + Q # predicted variance # Prediction step <- if calc_grad_log_likelihood: dA_all_params = p_dyn_model_callable.dAk(k) # derivatives of A wrt parameters dQ_all_params = p_dyn_model_callable.dQk(k) # derivatives of Q wrt parameters param_number = p_dP.shape[2] # p_dm, p_dP - derivatives form the previoius step dm_pred = np.empty(p_dm.shape) dP_pred = np.empty(p_dP.shape) for j in range(param_number): dA = dA_all_params[:,:,j] dQ = dQ_all_params[:,:,j] dP = p_dP[:,:,j] dm = p_dm[:,:,j] dm_pred[:,:,j] = np.dot(dA, p_m) + np.dot(A, dm) # prediction step derivatives for current parameter: dP_pred[:,:,j] = np.dot( dA ,np.dot(p_P, A.T)) dP_pred[:,:,j] += dP_pred[:,:,j].T dP_pred[:,:,j] += np.dot( A ,np.dot(dP, A.T)) + dQ dP_pred[:,:,j] = 0.5*(dP_pred[:,:,j] + dP_pred[:,:,j].T) #symmetrize else: dm_pred = None dP_pred = None return m_pred, P_pred, dm_pred, dP_pred
Desctrete prediction function Input: k:int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Mean value from the previous step. For "multiple time series mode" it is matrix, second dimension of which correspond to different time series. p_P: Covariance matrix from the previous step. p_dyn_model_callable: class calc_grad_log_likelihood: boolean Whether to calculate gradient of the marginal likelihood of the state-space model. If true then the next parameter must provide the extra parameters for gradient calculation. p_dm: 3D array (state_dim, time_series_no, parameters_no) Mean derivatives from the previous step. For "multiple time series mode" it is 3D array, second dimension of which correspond to different time series. p_dP: 3D array (state_dim, state_dim, parameters_no) Mean derivatives from the previous step Output: ---------------------------- m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects Results of the prediction steps.
Below is the the instruction that describes the task: ### Input: Desctrete prediction function Input: k:int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Mean value from the previous step. For "multiple time series mode" it is matrix, second dimension of which correspond to different time series. p_P: Covariance matrix from the previous step. p_dyn_model_callable: class calc_grad_log_likelihood: boolean Whether to calculate gradient of the marginal likelihood of the state-space model. If true then the next parameter must provide the extra parameters for gradient calculation. p_dm: 3D array (state_dim, time_series_no, parameters_no) Mean derivatives from the previous step. For "multiple time series mode" it is 3D array, second dimension of which correspond to different time series. p_dP: 3D array (state_dim, state_dim, parameters_no) Mean derivatives from the previous step Output: ---------------------------- m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects Results of the prediction steps. ### Response: def _kalman_prediction_step(k, p_m , p_P, p_dyn_model_callable, calc_grad_log_likelihood=False, p_dm = None, p_dP = None): """ Desctrete prediction function Input: k:int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Mean value from the previous step. For "multiple time series mode" it is matrix, second dimension of which correspond to different time series. p_P: Covariance matrix from the previous step. p_dyn_model_callable: class calc_grad_log_likelihood: boolean Whether to calculate gradient of the marginal likelihood of the state-space model. If true then the next parameter must provide the extra parameters for gradient calculation. p_dm: 3D array (state_dim, time_series_no, parameters_no) Mean derivatives from the previous step. For "multiple time series mode" it is 3D array, second dimension of which correspond to different time series. p_dP: 3D array (state_dim, state_dim, parameters_no) Mean derivatives from the previous step Output: ---------------------------- m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects Results of the prediction steps. """ # index correspond to values from previous iteration. A = p_dyn_model_callable.Ak(k,p_m,p_P) # state transition matrix (or Jacobian) Q = p_dyn_model_callable.Qk(k) # state noise matrix # Prediction step -> m_pred = p_dyn_model_callable.f_a(k, p_m, A) # predicted mean P_pred = A.dot(p_P).dot(A.T) + Q # predicted variance # Prediction step <- if calc_grad_log_likelihood: dA_all_params = p_dyn_model_callable.dAk(k) # derivatives of A wrt parameters dQ_all_params = p_dyn_model_callable.dQk(k) # derivatives of Q wrt parameters param_number = p_dP.shape[2] # p_dm, p_dP - derivatives form the previoius step dm_pred = np.empty(p_dm.shape) dP_pred = np.empty(p_dP.shape) for j in range(param_number): dA = dA_all_params[:,:,j] dQ = dQ_all_params[:,:,j] dP = p_dP[:,:,j] dm = p_dm[:,:,j] dm_pred[:,:,j] = np.dot(dA, p_m) + np.dot(A, dm) # prediction step derivatives for current parameter: dP_pred[:,:,j] = np.dot( dA ,np.dot(p_P, A.T)) dP_pred[:,:,j] += dP_pred[:,:,j].T dP_pred[:,:,j] += np.dot( A ,np.dot(dP, A.T)) + dQ dP_pred[:,:,j] = 0.5*(dP_pred[:,:,j] + dP_pred[:,:,j].T) #symmetrize else: dm_pred = None dP_pred = None return m_pred, P_pred, dm_pred, dP_pred
def _multiplexed_response_stream_helper(self, response): """A generator of multiplexed data blocks coming from a response stream.""" # Disable timeout on the underlying socket to prevent # Read timed out(s) for long running processes socket = self._get_raw_response_socket(response) self._disable_socket_timeout(socket) while True: header = response.raw.read(STREAM_HEADER_SIZE_BYTES) if not header: break _, length = struct.unpack('>BxxxL', header) if not length: continue data = response.raw.read(length) if not data: break yield data
A generator of multiplexed data blocks coming from a response stream.
Below is the the instruction that describes the task: ### Input: A generator of multiplexed data blocks coming from a response stream. ### Response: def _multiplexed_response_stream_helper(self, response): """A generator of multiplexed data blocks coming from a response stream.""" # Disable timeout on the underlying socket to prevent # Read timed out(s) for long running processes socket = self._get_raw_response_socket(response) self._disable_socket_timeout(socket) while True: header = response.raw.read(STREAM_HEADER_SIZE_BYTES) if not header: break _, length = struct.unpack('>BxxxL', header) if not length: continue data = response.raw.read(length) if not data: break yield data
def concatenate_fastas(output_fna_clustered, output_fna_failures, output_concat_filepath): """ Concatenates two input fastas, writes to output_concat_filepath output_fna_clustered: fasta of successful ref clusters output_fna_failures: de novo fasta of cluster failures output_concat_filepath: path to write combined fastas to """ output_fp = open(output_concat_filepath, "w") for label, seq in parse_fasta(open(output_fna_clustered, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) for label, seq in parse_fasta(open(output_fna_failures, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) return output_concat_filepath
Concatenates two input fastas, writes to output_concat_filepath output_fna_clustered: fasta of successful ref clusters output_fna_failures: de novo fasta of cluster failures output_concat_filepath: path to write combined fastas to
Below is the the instruction that describes the task: ### Input: Concatenates two input fastas, writes to output_concat_filepath output_fna_clustered: fasta of successful ref clusters output_fna_failures: de novo fasta of cluster failures output_concat_filepath: path to write combined fastas to ### Response: def concatenate_fastas(output_fna_clustered, output_fna_failures, output_concat_filepath): """ Concatenates two input fastas, writes to output_concat_filepath output_fna_clustered: fasta of successful ref clusters output_fna_failures: de novo fasta of cluster failures output_concat_filepath: path to write combined fastas to """ output_fp = open(output_concat_filepath, "w") for label, seq in parse_fasta(open(output_fna_clustered, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) for label, seq in parse_fasta(open(output_fna_failures, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) return output_concat_filepath
def readlen(args): """ %prog readlen fastqfile Calculate read length, will only try the first N reads. Output min, max, and avg for each file. """ p = OptionParser(readlen.__doc__) p.set_firstN() p.add_option("--silent", default=False, action="store_true", help="Do not print read length stats") p.add_option("--nocheck", default=False, action="store_true", help="Do not check file type suffix") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) f, = args if (not opts.nocheck) and (not is_fastq(f)): logging.debug("File `{}` does not endswith .fastq or .fq".format(f)) return 0 s = calc_readlen(f, opts.firstN) if not opts.silent: print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median))) return int(s.max)
%prog readlen fastqfile Calculate read length, will only try the first N reads. Output min, max, and avg for each file.
Below is the the instruction that describes the task: ### Input: %prog readlen fastqfile Calculate read length, will only try the first N reads. Output min, max, and avg for each file. ### Response: def readlen(args): """ %prog readlen fastqfile Calculate read length, will only try the first N reads. Output min, max, and avg for each file. """ p = OptionParser(readlen.__doc__) p.set_firstN() p.add_option("--silent", default=False, action="store_true", help="Do not print read length stats") p.add_option("--nocheck", default=False, action="store_true", help="Do not check file type suffix") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) f, = args if (not opts.nocheck) and (not is_fastq(f)): logging.debug("File `{}` does not endswith .fastq or .fq".format(f)) return 0 s = calc_readlen(f, opts.firstN) if not opts.silent: print("\t".join(str(x) for x in (f, s.min, s.max, s.mean, s.median))) return int(s.max)
def subject(name, meta_data=None, check_path=True): ''' subject(name) yields a freesurfer Subject object for the subject with the given name. Subjects are cached and not reloaded, so multiple calls to subject(name) will yield the same immutable subject object.. Note that subects returned by freesurfer_subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method--see the pimms library documentation regarding immutable classes and objects. The following options are accepted: * meta_data (default: None) may optionally be a map that contains meta-data to be passed along to the subject object (note that this meta-data will not be cached). * check_path (default: True) may optionally be set to False to ignore the requirement that a directory contain at least the mri/, label/, and surf/ directories to be considered a valid FreeSurfer subject directory. Subject objects returned using this argument are not cached. Additionally, check_path may be set to None instead of False, indicating that no checks or search should be performed; the string name should be trusted to be an exact relative or absolute path to a valid FreeSurfer subejct. ''' name = os.path.expanduser(os.path.expandvars(name)) if check_path is None: sub = Subject(name, check_path=False) if isinstance(sub, Subject): sub.persist() else: subpath = find_subject_path(name, check_path=check_path) if subpath is None and name == 'fsaverage': # we can use the benson and winawer 2018 dataset import neuropythy as ny try: return ny.data['benson_winawer_2018'].subjects['fsaverage'] except Exception: pass # error message below is more accurate... if subpath is None: raise ValueError('Could not locate subject with name \'%s\'' % name) elif check_path: fpath = '/' + os.path.relpath(subpath, '/') if fpath in subject._cache: sub = subject._cache[fpath] else: sub = Subject(subpath) if isinstance(sub, Subject): subject._cache[fpath] = sub.persist() else: sub = Subject(subpath, check_path=False) if isinstance(sub, Subject): sub.persist() return (None if sub is None else sub.with_meta(meta_data) if meta_data is not None else sub)
subject(name) yields a freesurfer Subject object for the subject with the given name. Subjects are cached and not reloaded, so multiple calls to subject(name) will yield the same immutable subject object.. Note that subects returned by freesurfer_subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method--see the pimms library documentation regarding immutable classes and objects. The following options are accepted: * meta_data (default: None) may optionally be a map that contains meta-data to be passed along to the subject object (note that this meta-data will not be cached). * check_path (default: True) may optionally be set to False to ignore the requirement that a directory contain at least the mri/, label/, and surf/ directories to be considered a valid FreeSurfer subject directory. Subject objects returned using this argument are not cached. Additionally, check_path may be set to None instead of False, indicating that no checks or search should be performed; the string name should be trusted to be an exact relative or absolute path to a valid FreeSurfer subejct.
Below is the the instruction that describes the task: ### Input: subject(name) yields a freesurfer Subject object for the subject with the given name. Subjects are cached and not reloaded, so multiple calls to subject(name) will yield the same immutable subject object.. Note that subects returned by freesurfer_subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method--see the pimms library documentation regarding immutable classes and objects. The following options are accepted: * meta_data (default: None) may optionally be a map that contains meta-data to be passed along to the subject object (note that this meta-data will not be cached). * check_path (default: True) may optionally be set to False to ignore the requirement that a directory contain at least the mri/, label/, and surf/ directories to be considered a valid FreeSurfer subject directory. Subject objects returned using this argument are not cached. Additionally, check_path may be set to None instead of False, indicating that no checks or search should be performed; the string name should be trusted to be an exact relative or absolute path to a valid FreeSurfer subejct. ### Response: def subject(name, meta_data=None, check_path=True): ''' subject(name) yields a freesurfer Subject object for the subject with the given name. Subjects are cached and not reloaded, so multiple calls to subject(name) will yield the same immutable subject object.. Note that subects returned by freesurfer_subject() are always persistent Immutable objects; this means that you must create a transient version of the subject to modify it via the member function sub.transient(). Better, you can make copies of the objects with desired modifications using the copy method--see the pimms library documentation regarding immutable classes and objects. The following options are accepted: * meta_data (default: None) may optionally be a map that contains meta-data to be passed along to the subject object (note that this meta-data will not be cached). * check_path (default: True) may optionally be set to False to ignore the requirement that a directory contain at least the mri/, label/, and surf/ directories to be considered a valid FreeSurfer subject directory. Subject objects returned using this argument are not cached. Additionally, check_path may be set to None instead of False, indicating that no checks or search should be performed; the string name should be trusted to be an exact relative or absolute path to a valid FreeSurfer subejct. ''' name = os.path.expanduser(os.path.expandvars(name)) if check_path is None: sub = Subject(name, check_path=False) if isinstance(sub, Subject): sub.persist() else: subpath = find_subject_path(name, check_path=check_path) if subpath is None and name == 'fsaverage': # we can use the benson and winawer 2018 dataset import neuropythy as ny try: return ny.data['benson_winawer_2018'].subjects['fsaverage'] except Exception: pass # error message below is more accurate... if subpath is None: raise ValueError('Could not locate subject with name \'%s\'' % name) elif check_path: fpath = '/' + os.path.relpath(subpath, '/') if fpath in subject._cache: sub = subject._cache[fpath] else: sub = Subject(subpath) if isinstance(sub, Subject): subject._cache[fpath] = sub.persist() else: sub = Subject(subpath, check_path=False) if isinstance(sub, Subject): sub.persist() return (None if sub is None else sub.with_meta(meta_data) if meta_data is not None else sub)
def fetchmany(self, size=None): """ Fetches a batch of rows in the active result set generated with ``execute()`` or ``executemany()``. :param size: Controls how many rows are returned. The default ``None`` means that the value of Cursor.arraysize is used. :return: A list of rows """ if size is None: size = self.arraysize if (size <= 0): raise InterfaceError("Invalid arraysize {} for fetchmany()".format(size)) return [row for row in islice(self, size)]
Fetches a batch of rows in the active result set generated with ``execute()`` or ``executemany()``. :param size: Controls how many rows are returned. The default ``None`` means that the value of Cursor.arraysize is used. :return: A list of rows
Below is the the instruction that describes the task: ### Input: Fetches a batch of rows in the active result set generated with ``execute()`` or ``executemany()``. :param size: Controls how many rows are returned. The default ``None`` means that the value of Cursor.arraysize is used. :return: A list of rows ### Response: def fetchmany(self, size=None): """ Fetches a batch of rows in the active result set generated with ``execute()`` or ``executemany()``. :param size: Controls how many rows are returned. The default ``None`` means that the value of Cursor.arraysize is used. :return: A list of rows """ if size is None: size = self.arraysize if (size <= 0): raise InterfaceError("Invalid arraysize {} for fetchmany()".format(size)) return [row for row in islice(self, size)]
def bind(self, extension: Extension) -> 'DictMentor': """ Add any predefined or custom extension. Args: extension: Extension to add to the processor. Returns: The DictMentor itself for chaining. """ if not Extension.is_valid_extension(extension): raise ValueError("Cannot bind extension due to missing interface requirements") self._extensions.append(extension) return self
Add any predefined or custom extension. Args: extension: Extension to add to the processor. Returns: The DictMentor itself for chaining.
Below is the the instruction that describes the task: ### Input: Add any predefined or custom extension. Args: extension: Extension to add to the processor. Returns: The DictMentor itself for chaining. ### Response: def bind(self, extension: Extension) -> 'DictMentor': """ Add any predefined or custom extension. Args: extension: Extension to add to the processor. Returns: The DictMentor itself for chaining. """ if not Extension.is_valid_extension(extension): raise ValueError("Cannot bind extension due to missing interface requirements") self._extensions.append(extension) return self
def list_extensions(request): """List neutron extensions. :param request: django request object """ neutron_api = neutronclient(request) try: extensions_list = neutron_api.list_extensions() except exceptions.ServiceCatalogException: return {} if 'extensions' in extensions_list: return tuple(extensions_list['extensions']) else: return ()
List neutron extensions. :param request: django request object
Below is the the instruction that describes the task: ### Input: List neutron extensions. :param request: django request object ### Response: def list_extensions(request): """List neutron extensions. :param request: django request object """ neutron_api = neutronclient(request) try: extensions_list = neutron_api.list_extensions() except exceptions.ServiceCatalogException: return {} if 'extensions' in extensions_list: return tuple(extensions_list['extensions']) else: return ()
def checkNGOODPIX(filelist): """ Only for ACS, WFC3 and STIS, check NGOODPIX If all pixels are 'bad' on all chips, exclude this image from further processing. Similar checks requiring comparing 'driz_sep_bits' against WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be done separately (and later). """ toclose = False removed_files = [] supported_instruments = ['ACS','STIS','WFC3'] for inputfile in filelist: if isinstance(inputfile, str): if fileutil.getKeyword(inputfile,'instrume') in supported_instruments: inputfile = fits.open(inputfile) toclose = True elif inputfile[0].header['instrume'] not in supported_instruments: continue ngood = 0 for extn in inputfile: if 'EXTNAME' in extn.header and extn.header['EXTNAME'] == 'SCI': ngood += extn.header['NGOODPIX'] if (ngood == 0): removed_files.append(inputfile) if toclose: inputfile.close() if removed_files != []: print("Warning: Files without valid pixels detected: keyword NGOODPIX = 0.0") print("Warning: Removing the following files from input list") for f in removed_files: print('\t',f.filename() or "") return removed_files
Only for ACS, WFC3 and STIS, check NGOODPIX If all pixels are 'bad' on all chips, exclude this image from further processing. Similar checks requiring comparing 'driz_sep_bits' against WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be done separately (and later).
Below is the the instruction that describes the task: ### Input: Only for ACS, WFC3 and STIS, check NGOODPIX If all pixels are 'bad' on all chips, exclude this image from further processing. Similar checks requiring comparing 'driz_sep_bits' against WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be done separately (and later). ### Response: def checkNGOODPIX(filelist): """ Only for ACS, WFC3 and STIS, check NGOODPIX If all pixels are 'bad' on all chips, exclude this image from further processing. Similar checks requiring comparing 'driz_sep_bits' against WFPC2 c1f.fits arrays and NICMOS DQ arrays will need to be done separately (and later). """ toclose = False removed_files = [] supported_instruments = ['ACS','STIS','WFC3'] for inputfile in filelist: if isinstance(inputfile, str): if fileutil.getKeyword(inputfile,'instrume') in supported_instruments: inputfile = fits.open(inputfile) toclose = True elif inputfile[0].header['instrume'] not in supported_instruments: continue ngood = 0 for extn in inputfile: if 'EXTNAME' in extn.header and extn.header['EXTNAME'] == 'SCI': ngood += extn.header['NGOODPIX'] if (ngood == 0): removed_files.append(inputfile) if toclose: inputfile.close() if removed_files != []: print("Warning: Files without valid pixels detected: keyword NGOODPIX = 0.0") print("Warning: Removing the following files from input list") for f in removed_files: print('\t',f.filename() or "") return removed_files
def Description(self): """Returns searchable data as Description""" descr = " ".join((self.getId(), self.aq_parent.Title())) return safe_unicode(descr).encode('utf-8')
Returns searchable data as Description
Below is the the instruction that describes the task: ### Input: Returns searchable data as Description ### Response: def Description(self): """Returns searchable data as Description""" descr = " ".join((self.getId(), self.aq_parent.Title())) return safe_unicode(descr).encode('utf-8')
def select_neighbors_by_edge_attribute(docgraph, source, attribute=None, value=None, data=False): """Get all neighbors with the given edge attribute value(s).""" assert isinstance(docgraph, MultiGraph) for neighbor_id in docgraph.neighbors_iter(source): edges = docgraph[source][neighbor_id].values() if attribute is None: has_attrib = True # don't filter neighbors else: has_attrib = any(attribute in edge for edge in edges) if has_attrib: if value is None: has_value = True elif isinstance(value, basestring): has_value = any(edge.get(attribute) == value for edge in edges) else: # ``value`` is a list/set/dict of values has_value = any(edge.get(attribute) == v for edge in edges for v in value) if has_value: if data: yield (neighbor_id, docgraph.node[neighbor_id]) else: yield neighbor_id
Get all neighbors with the given edge attribute value(s).
Below is the the instruction that describes the task: ### Input: Get all neighbors with the given edge attribute value(s). ### Response: def select_neighbors_by_edge_attribute(docgraph, source, attribute=None, value=None, data=False): """Get all neighbors with the given edge attribute value(s).""" assert isinstance(docgraph, MultiGraph) for neighbor_id in docgraph.neighbors_iter(source): edges = docgraph[source][neighbor_id].values() if attribute is None: has_attrib = True # don't filter neighbors else: has_attrib = any(attribute in edge for edge in edges) if has_attrib: if value is None: has_value = True elif isinstance(value, basestring): has_value = any(edge.get(attribute) == value for edge in edges) else: # ``value`` is a list/set/dict of values has_value = any(edge.get(attribute) == v for edge in edges for v in value) if has_value: if data: yield (neighbor_id, docgraph.node[neighbor_id]) else: yield neighbor_id
def start(self): '''Starts measuring time, and prints the bar at 0%. It returns self so you can use it like this: >>> pbar = ProgressBar().start() >>> for i in range(100): ... # do something ... pbar.update(i+1) ... >>> pbar.finish() ''' if self.maxval is None: self.maxval = self._DEFAULT_MAXVAL self.num_intervals = max(100, self.term_width) self.next_update = 0 if self.maxval is not UnknownLength: if self.maxval < 0: raise ValueError('Value out of range') self.update_interval = self.maxval / self.num_intervals self.start_time = self.last_update_time = time.time() self.update(0) return self
Starts measuring time, and prints the bar at 0%. It returns self so you can use it like this: >>> pbar = ProgressBar().start() >>> for i in range(100): ... # do something ... pbar.update(i+1) ... >>> pbar.finish()
Below is the the instruction that describes the task: ### Input: Starts measuring time, and prints the bar at 0%. It returns self so you can use it like this: >>> pbar = ProgressBar().start() >>> for i in range(100): ... # do something ... pbar.update(i+1) ... >>> pbar.finish() ### Response: def start(self): '''Starts measuring time, and prints the bar at 0%. It returns self so you can use it like this: >>> pbar = ProgressBar().start() >>> for i in range(100): ... # do something ... pbar.update(i+1) ... >>> pbar.finish() ''' if self.maxval is None: self.maxval = self._DEFAULT_MAXVAL self.num_intervals = max(100, self.term_width) self.next_update = 0 if self.maxval is not UnknownLength: if self.maxval < 0: raise ValueError('Value out of range') self.update_interval = self.maxval / self.num_intervals self.start_time = self.last_update_time = time.time() self.update(0) return self
def ancestors_root(self): """Returns a list of the ancestors of this node but does not pass the root node, even if the root has parents due to cycles.""" if self.is_root(): return [] ancestors = set([]) self._depth_ascend(self, ancestors, True) try: ancestors.remove(self) except KeyError: # we weren't ancestor of ourself, that's ok pass return list(ancestors)
Returns a list of the ancestors of this node but does not pass the root node, even if the root has parents due to cycles.
Below is the the instruction that describes the task: ### Input: Returns a list of the ancestors of this node but does not pass the root node, even if the root has parents due to cycles. ### Response: def ancestors_root(self): """Returns a list of the ancestors of this node but does not pass the root node, even if the root has parents due to cycles.""" if self.is_root(): return [] ancestors = set([]) self._depth_ascend(self, ancestors, True) try: ancestors.remove(self) except KeyError: # we weren't ancestor of ourself, that's ok pass return list(ancestors)
def _save_image(fig, filename, filetype='png', resolution=300): """ If filename is specified, saves the image :param str filename: Name of the file :param str filetype: Type of file :param int resolution: DPI resolution of the output figure """ if filename: filename, filetype, resolution = build_filename(filename, filetype, resolution) fig.savefig(filename, dpi=resolution, format=filetype) else: pass
If filename is specified, saves the image :param str filename: Name of the file :param str filetype: Type of file :param int resolution: DPI resolution of the output figure
Below is the the instruction that describes the task: ### Input: If filename is specified, saves the image :param str filename: Name of the file :param str filetype: Type of file :param int resolution: DPI resolution of the output figure ### Response: def _save_image(fig, filename, filetype='png', resolution=300): """ If filename is specified, saves the image :param str filename: Name of the file :param str filetype: Type of file :param int resolution: DPI resolution of the output figure """ if filename: filename, filetype, resolution = build_filename(filename, filetype, resolution) fig.savefig(filename, dpi=resolution, format=filetype) else: pass
def get(self, filepath): """ Get directory details for the specified file. If contents is set to True (default) then the directory contents will be sent along with the directory details. If dir_size is set to True (default=False) then du -hs will be run against subdirectories for accurate content sizes. """ contents = self.get_argument('contents', False) if contents == u'true': contents = True else: contents = False dir_sizes = self.get_argument('dir_sizes', False) if dir_sizes == u'true': dir_sizes = True else: dir_sizes = False try: res = self.fs.get_directory_details(filepath,contents=contents,dir_sizes=dir_sizes) res = res.to_dict() self.write(res) except OSError: raise tornado.web.HTTPError(404)
Get directory details for the specified file. If contents is set to True (default) then the directory contents will be sent along with the directory details. If dir_size is set to True (default=False) then du -hs will be run against subdirectories for accurate content sizes.
Below is the the instruction that describes the task: ### Input: Get directory details for the specified file. If contents is set to True (default) then the directory contents will be sent along with the directory details. If dir_size is set to True (default=False) then du -hs will be run against subdirectories for accurate content sizes. ### Response: def get(self, filepath): """ Get directory details for the specified file. If contents is set to True (default) then the directory contents will be sent along with the directory details. If dir_size is set to True (default=False) then du -hs will be run against subdirectories for accurate content sizes. """ contents = self.get_argument('contents', False) if contents == u'true': contents = True else: contents = False dir_sizes = self.get_argument('dir_sizes', False) if dir_sizes == u'true': dir_sizes = True else: dir_sizes = False try: res = self.fs.get_directory_details(filepath,contents=contents,dir_sizes=dir_sizes) res = res.to_dict() self.write(res) except OSError: raise tornado.web.HTTPError(404)
def deletesshkey(self, key_id): """ Deletes an sshkey for the current user identified by id :param key_id: the id of the key :return: False if it didn't delete it, True if it was deleted """ request = requests.delete( '{0}/{1}'.format(self.keys_url, key_id), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.content == b'null': return False else: return True
Deletes an sshkey for the current user identified by id :param key_id: the id of the key :return: False if it didn't delete it, True if it was deleted
Below is the the instruction that describes the task: ### Input: Deletes an sshkey for the current user identified by id :param key_id: the id of the key :return: False if it didn't delete it, True if it was deleted ### Response: def deletesshkey(self, key_id): """ Deletes an sshkey for the current user identified by id :param key_id: the id of the key :return: False if it didn't delete it, True if it was deleted """ request = requests.delete( '{0}/{1}'.format(self.keys_url, key_id), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.content == b'null': return False else: return True
def sctiks(sc, clkstr): """ Convert a spacecraft clock format string to number of "ticks". http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sctiks_c.html :param sc: NAIF spacecraft identification code. :type sc: int :param clkstr: Character representation of a spacecraft clock. :type clkstr: str :return: Number of ticks represented by the clock string. :rtype: float """ sc = ctypes.c_int(sc) clkstr = stypes.stringToCharP(clkstr) ticks = ctypes.c_double() libspice.sctiks_c(sc, clkstr, ctypes.byref(ticks)) return ticks.value
Convert a spacecraft clock format string to number of "ticks". http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sctiks_c.html :param sc: NAIF spacecraft identification code. :type sc: int :param clkstr: Character representation of a spacecraft clock. :type clkstr: str :return: Number of ticks represented by the clock string. :rtype: float
Below is the the instruction that describes the task: ### Input: Convert a spacecraft clock format string to number of "ticks". http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sctiks_c.html :param sc: NAIF spacecraft identification code. :type sc: int :param clkstr: Character representation of a spacecraft clock. :type clkstr: str :return: Number of ticks represented by the clock string. :rtype: float ### Response: def sctiks(sc, clkstr): """ Convert a spacecraft clock format string to number of "ticks". http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sctiks_c.html :param sc: NAIF spacecraft identification code. :type sc: int :param clkstr: Character representation of a spacecraft clock. :type clkstr: str :return: Number of ticks represented by the clock string. :rtype: float """ sc = ctypes.c_int(sc) clkstr = stypes.stringToCharP(clkstr) ticks = ctypes.c_double() libspice.sctiks_c(sc, clkstr, ctypes.byref(ticks)) return ticks.value
def unicode2encode(text, charmap): ''' charmap : dictionary which has both encode as key, unicode as value ''' if isinstance(text, (list, tuple)): unitxt = '' for line in text: for val,key in charmap.items(): if key in line: line = line.replace(key, val) # end of if val in text: unitxt += line # end of for line in text: return unitxt elif isinstance(text, str): for val,key in charmap.items(): if key in text: text = text.replace(key, val) return text
charmap : dictionary which has both encode as key, unicode as value
Below is the the instruction that describes the task: ### Input: charmap : dictionary which has both encode as key, unicode as value ### Response: def unicode2encode(text, charmap): ''' charmap : dictionary which has both encode as key, unicode as value ''' if isinstance(text, (list, tuple)): unitxt = '' for line in text: for val,key in charmap.items(): if key in line: line = line.replace(key, val) # end of if val in text: unitxt += line # end of for line in text: return unitxt elif isinstance(text, str): for val,key in charmap.items(): if key in text: text = text.replace(key, val) return text
def shader_substring(body, stack_frame=1): """ Call this method from a function that defines a literal shader string as the "body" argument. Dresses up a shader string in two ways: 1) Insert #line number declaration 2) un-indents The line number information can help debug glsl compile errors. The unindenting allows you to type the shader code at a pleasing indent level in your python method, while still creating an unindented GLSL string at the end. """ line_count = len(body.splitlines(True)) line_number = inspect.stack()[stack_frame][2] + 1 - line_count return """\ #line %d %s """ % (line_number, textwrap.dedent(body))
Call this method from a function that defines a literal shader string as the "body" argument. Dresses up a shader string in two ways: 1) Insert #line number declaration 2) un-indents The line number information can help debug glsl compile errors. The unindenting allows you to type the shader code at a pleasing indent level in your python method, while still creating an unindented GLSL string at the end.
Below is the the instruction that describes the task: ### Input: Call this method from a function that defines a literal shader string as the "body" argument. Dresses up a shader string in two ways: 1) Insert #line number declaration 2) un-indents The line number information can help debug glsl compile errors. The unindenting allows you to type the shader code at a pleasing indent level in your python method, while still creating an unindented GLSL string at the end. ### Response: def shader_substring(body, stack_frame=1): """ Call this method from a function that defines a literal shader string as the "body" argument. Dresses up a shader string in two ways: 1) Insert #line number declaration 2) un-indents The line number information can help debug glsl compile errors. The unindenting allows you to type the shader code at a pleasing indent level in your python method, while still creating an unindented GLSL string at the end. """ line_count = len(body.splitlines(True)) line_number = inspect.stack()[stack_frame][2] + 1 - line_count return """\ #line %d %s """ % (line_number, textwrap.dedent(body))
def wait_while_reachable(self, servers, timeout=60): """wait while all servers be reachable Args: servers - list of servers """ t_start = time.time() while True: try: for server in servers: # TODO: use state code to check if server is reachable server_info = self.connection( hostname=server, timeout=5).admin.command('ismaster') logger.debug("server_info: {server_info}".format(server_info=server_info)) if int(server_info['ok']) != 1: raise pymongo.errors.OperationFailure("{server} is not reachable".format(**locals)) return True except (KeyError, AttributeError, pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure): if time.time() - t_start > timeout: return False time.sleep(0.1)
wait while all servers be reachable Args: servers - list of servers
Below is the the instruction that describes the task: ### Input: wait while all servers be reachable Args: servers - list of servers ### Response: def wait_while_reachable(self, servers, timeout=60): """wait while all servers be reachable Args: servers - list of servers """ t_start = time.time() while True: try: for server in servers: # TODO: use state code to check if server is reachable server_info = self.connection( hostname=server, timeout=5).admin.command('ismaster') logger.debug("server_info: {server_info}".format(server_info=server_info)) if int(server_info['ok']) != 1: raise pymongo.errors.OperationFailure("{server} is not reachable".format(**locals)) return True except (KeyError, AttributeError, pymongo.errors.AutoReconnect, pymongo.errors.OperationFailure): if time.time() - t_start > timeout: return False time.sleep(0.1)
def set_bind(self): """ Sets key bindings -- we need this more than once """ IntegerEntry.set_bind(self) self.unbind('<Shift-Up>') self.unbind('<Shift-Down>') self.unbind('<Control-Up>') self.unbind('<Control-Down>') self.unbind('<Double-Button-1>') self.unbind('<Double-Button-3>') self.unbind('<Shift-Button-1>') self.unbind('<Shift-Button-3>') self.unbind('<Control-Button-1>') self.unbind('<Control-Button-3>') self.bind('<Button-1>', lambda e: self.add(1)) self.bind('<Button-3>', lambda e: self.sub(1)) self.bind('<Up>', lambda e: self.add(1)) self.bind('<Down>', lambda e: self.sub(1)) self.bind('<Enter>', self._enter) self.bind('<Next>', lambda e: self.set(self.allowed[0])) self.bind('<Prior>', lambda e: self.set(self.allowed[-1]))
Sets key bindings -- we need this more than once
Below is the the instruction that describes the task: ### Input: Sets key bindings -- we need this more than once ### Response: def set_bind(self): """ Sets key bindings -- we need this more than once """ IntegerEntry.set_bind(self) self.unbind('<Shift-Up>') self.unbind('<Shift-Down>') self.unbind('<Control-Up>') self.unbind('<Control-Down>') self.unbind('<Double-Button-1>') self.unbind('<Double-Button-3>') self.unbind('<Shift-Button-1>') self.unbind('<Shift-Button-3>') self.unbind('<Control-Button-1>') self.unbind('<Control-Button-3>') self.bind('<Button-1>', lambda e: self.add(1)) self.bind('<Button-3>', lambda e: self.sub(1)) self.bind('<Up>', lambda e: self.add(1)) self.bind('<Down>', lambda e: self.sub(1)) self.bind('<Enter>', self._enter) self.bind('<Next>', lambda e: self.set(self.allowed[0])) self.bind('<Prior>', lambda e: self.set(self.allowed[-1]))
def data_to_bytes(self, data_element, encoding=None): """ Converts the given data element into a string representation using the :method:`data_to_string` method and encodes the resulting text with the given encoding. """ if encoding is None: encoding = self.encoding text = self.data_to_string(data_element) return bytes_(text, encoding=encoding)
Converts the given data element into a string representation using the :method:`data_to_string` method and encodes the resulting text with the given encoding.
Below is the the instruction that describes the task: ### Input: Converts the given data element into a string representation using the :method:`data_to_string` method and encodes the resulting text with the given encoding. ### Response: def data_to_bytes(self, data_element, encoding=None): """ Converts the given data element into a string representation using the :method:`data_to_string` method and encodes the resulting text with the given encoding. """ if encoding is None: encoding = self.encoding text = self.data_to_string(data_element) return bytes_(text, encoding=encoding)
def set_tag(self, name, tag_class): """ Define a new tag parser method :param name: The name of the tag :type name: str :param tag_class: The Tag class, this must be a subclass of base parser.tags.Tag :type tag_class: Tag """ # Has this tag already been defined? if name in self._tags: self._log.warn('Overwriting an existing Tag class: {tag}'.format(tag=name)) # Make sure the tag class adhered to the base Tag interface if not issubclass(tag_class, Tag): self._log.error('Tag class must implement the base Tag interface, please review the documentation on ' 'defining custom tags. (Refusing to set the tag "{tag}")'.format(tag=name)) return self._tags[name] = tag_class
Define a new tag parser method :param name: The name of the tag :type name: str :param tag_class: The Tag class, this must be a subclass of base parser.tags.Tag :type tag_class: Tag
Below is the the instruction that describes the task: ### Input: Define a new tag parser method :param name: The name of the tag :type name: str :param tag_class: The Tag class, this must be a subclass of base parser.tags.Tag :type tag_class: Tag ### Response: def set_tag(self, name, tag_class): """ Define a new tag parser method :param name: The name of the tag :type name: str :param tag_class: The Tag class, this must be a subclass of base parser.tags.Tag :type tag_class: Tag """ # Has this tag already been defined? if name in self._tags: self._log.warn('Overwriting an existing Tag class: {tag}'.format(tag=name)) # Make sure the tag class adhered to the base Tag interface if not issubclass(tag_class, Tag): self._log.error('Tag class must implement the base Tag interface, please review the documentation on ' 'defining custom tags. (Refusing to set the tag "{tag}")'.format(tag=name)) return self._tags[name] = tag_class
def gen_ipa_data(self): """ Generator for iterating over the IPA strings found in the dataset file. Yields the IPA data string paired with the respective line number. """ dialect = self.get_dialect() f = self._open() try: if dialect: for res in self._gen_csv_data(f, dialect): yield res else: for res in self._gen_txt_data(f): yield res finally: f.close()
Generator for iterating over the IPA strings found in the dataset file. Yields the IPA data string paired with the respective line number.
Below is the the instruction that describes the task: ### Input: Generator for iterating over the IPA strings found in the dataset file. Yields the IPA data string paired with the respective line number. ### Response: def gen_ipa_data(self): """ Generator for iterating over the IPA strings found in the dataset file. Yields the IPA data string paired with the respective line number. """ dialect = self.get_dialect() f = self._open() try: if dialect: for res in self._gen_csv_data(f, dialect): yield res else: for res in self._gen_txt_data(f): yield res finally: f.close()
def jsonobjlen(self, name, path=Path.rootPath()): """ Returns the length of the dictionary JSON value under ``path`` at key ``name`` """ return self.execute_command('JSON.OBJLEN', name, str_path(path))
Returns the length of the dictionary JSON value under ``path`` at key ``name``
Below is the the instruction that describes the task: ### Input: Returns the length of the dictionary JSON value under ``path`` at key ``name`` ### Response: def jsonobjlen(self, name, path=Path.rootPath()): """ Returns the length of the dictionary JSON value under ``path`` at key ``name`` """ return self.execute_command('JSON.OBJLEN', name, str_path(path))
def allow( self, foreign, weight=None, permission="active", account=None, threshold=None, **kwargs ): """ Give additional access to an account by some other public key or account. :param str foreign: The foreign account that will obtain access :param int weight: (optional) The weight to use. If not define, the threshold will be used. If the weight is smaller than the threshold, additional signatures will be required. (defaults to threshold) :param str permission: (optional) The actual permission to modify (defaults to ``active``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) :param int threshold: The threshold that needs to be reached by signatures to be able to interact """ from copy import deepcopy if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") if permission not in ["owner", "active"]: raise ValueError("Permission needs to be either 'owner', or 'active") account = Account(account, blockchain_instance=self) if not weight: weight = account[permission]["weight_threshold"] authority = deepcopy(account[permission]) try: pubkey = PublicKey(foreign, prefix=self.prefix) authority["key_auths"].append([str(pubkey), weight]) except Exception: try: foreign_account = Account(foreign, blockchain_instance=self) authority["account_auths"].append([foreign_account["id"], weight]) except Exception: raise ValueError("Unknown foreign account or invalid public key") if threshold: authority["weight_threshold"] = threshold self._test_weights_treshold(authority) op = operations.Account_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "account": account["id"], permission: authority, "extensions": {}, "prefix": self.prefix, } ) if permission == "owner": return self.finalizeOp(op, account["name"], "owner", **kwargs) else: return self.finalizeOp(op, account["name"], "active", **kwargs)
Give additional access to an account by some other public key or account. :param str foreign: The foreign account that will obtain access :param int weight: (optional) The weight to use. If not define, the threshold will be used. If the weight is smaller than the threshold, additional signatures will be required. (defaults to threshold) :param str permission: (optional) The actual permission to modify (defaults to ``active``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) :param int threshold: The threshold that needs to be reached by signatures to be able to interact
Below is the the instruction that describes the task: ### Input: Give additional access to an account by some other public key or account. :param str foreign: The foreign account that will obtain access :param int weight: (optional) The weight to use. If not define, the threshold will be used. If the weight is smaller than the threshold, additional signatures will be required. (defaults to threshold) :param str permission: (optional) The actual permission to modify (defaults to ``active``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) :param int threshold: The threshold that needs to be reached by signatures to be able to interact ### Response: def allow( self, foreign, weight=None, permission="active", account=None, threshold=None, **kwargs ): """ Give additional access to an account by some other public key or account. :param str foreign: The foreign account that will obtain access :param int weight: (optional) The weight to use. If not define, the threshold will be used. If the weight is smaller than the threshold, additional signatures will be required. (defaults to threshold) :param str permission: (optional) The actual permission to modify (defaults to ``active``) :param str account: (optional) the account to allow access to (defaults to ``default_account``) :param int threshold: The threshold that needs to be reached by signatures to be able to interact """ from copy import deepcopy if not account: if "default_account" in self.config: account = self.config["default_account"] if not account: raise ValueError("You need to provide an account") if permission not in ["owner", "active"]: raise ValueError("Permission needs to be either 'owner', or 'active") account = Account(account, blockchain_instance=self) if not weight: weight = account[permission]["weight_threshold"] authority = deepcopy(account[permission]) try: pubkey = PublicKey(foreign, prefix=self.prefix) authority["key_auths"].append([str(pubkey), weight]) except Exception: try: foreign_account = Account(foreign, blockchain_instance=self) authority["account_auths"].append([foreign_account["id"], weight]) except Exception: raise ValueError("Unknown foreign account or invalid public key") if threshold: authority["weight_threshold"] = threshold self._test_weights_treshold(authority) op = operations.Account_update( **{ "fee": {"amount": 0, "asset_id": "1.3.0"}, "account": account["id"], permission: authority, "extensions": {}, "prefix": self.prefix, } ) if permission == "owner": return self.finalizeOp(op, account["name"], "owner", **kwargs) else: return self.finalizeOp(op, account["name"], "active", **kwargs)
def infer_assign(self, context=None): """infer a AssignName/AssignAttr: need to inspect the RHS part of the assign node """ stmt = self.statement() if isinstance(stmt, nodes.AugAssign): return stmt.infer(context) stmts = list(self.assigned_stmts(context=context)) return bases._infer_stmts(stmts, context)
infer a AssignName/AssignAttr: need to inspect the RHS part of the assign node
Below is the the instruction that describes the task: ### Input: infer a AssignName/AssignAttr: need to inspect the RHS part of the assign node ### Response: def infer_assign(self, context=None): """infer a AssignName/AssignAttr: need to inspect the RHS part of the assign node """ stmt = self.statement() if isinstance(stmt, nodes.AugAssign): return stmt.infer(context) stmts = list(self.assigned_stmts(context=context)) return bases._infer_stmts(stmts, context)
def _find_executables(name): """ Try to find an executable. """ exe_name = name + '.exe' * sys.platform.startswith('win') env_path = os.environ.get(name.upper()+ '_PATH', '') possible_locations = [] def add(*dirs): for d in dirs: if d and d not in possible_locations and os.path.isdir(d): possible_locations.append(d) # Get list of possible locations add(env_path) try: add(os.path.dirname(os.path.abspath(__file__))) except NameError: # __file__ may not exist pass add(os.path.dirname(sys.executable)) add(os.path.expanduser('~')) # Platform specific possible locations if sys.platform.startswith('win'): add('c:\\program files', os.environ.get('PROGRAMFILES'), 'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)')) else: possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin']) def do_check_version(exe): try: return subprocess.check_output([exe, '--version']).decode().strip() except Exception: # print('not a good exe', exe) return False # If env path is the exe itself ... if os.path.isfile(env_path): ver = do_check_version(env_path) if ver: return env_path, ver # First try to find obvious locations for d in possible_locations: for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]: if os.path.isfile(exe): ver = do_check_version(exe) if ver: return exe, ver # Maybe the exe is on the PATH ver = do_check_version(exe_name) if ver: return exe_name, ver # Try harder for d in possible_locations: for sub in reversed(sorted(os.listdir(d))): if sub.startswith(name): exe = os.path.join(d, sub, exe_name) if os.path.isfile(exe): ver = do_check_version(exe) if ver: return exe, ver return None, None
Try to find an executable.
Below is the the instruction that describes the task: ### Input: Try to find an executable. ### Response: def _find_executables(name): """ Try to find an executable. """ exe_name = name + '.exe' * sys.platform.startswith('win') env_path = os.environ.get(name.upper()+ '_PATH', '') possible_locations = [] def add(*dirs): for d in dirs: if d and d not in possible_locations and os.path.isdir(d): possible_locations.append(d) # Get list of possible locations add(env_path) try: add(os.path.dirname(os.path.abspath(__file__))) except NameError: # __file__ may not exist pass add(os.path.dirname(sys.executable)) add(os.path.expanduser('~')) # Platform specific possible locations if sys.platform.startswith('win'): add('c:\\program files', os.environ.get('PROGRAMFILES'), 'c:\\program files (x86)', os.environ.get('PROGRAMFILES(x86)')) else: possible_locations.extend(['/usr/bin','/usr/local/bin','/opt/local/bin']) def do_check_version(exe): try: return subprocess.check_output([exe, '--version']).decode().strip() except Exception: # print('not a good exe', exe) return False # If env path is the exe itself ... if os.path.isfile(env_path): ver = do_check_version(env_path) if ver: return env_path, ver # First try to find obvious locations for d in possible_locations: for exe in [os.path.join(d, exe_name), os.path.join(d, name, exe_name)]: if os.path.isfile(exe): ver = do_check_version(exe) if ver: return exe, ver # Maybe the exe is on the PATH ver = do_check_version(exe_name) if ver: return exe_name, ver # Try harder for d in possible_locations: for sub in reversed(sorted(os.listdir(d))): if sub.startswith(name): exe = os.path.join(d, sub, exe_name) if os.path.isfile(exe): ver = do_check_version(exe) if ver: return exe, ver return None, None
def read_constraints_from_config(cp, transforms=None, constraint_section='constraint'): """Loads parameter constraints from a configuration file. Parameters ---------- cp : WorkflowConfigParser An open config parser to read from. transforms : list, optional List of transforms to apply to parameters before applying constraints. constraint_section : str, optional The section to get the constraints from. Default is 'constraint'. Returns ------- list List of ``Constraint`` objects. Empty if no constraints were provided. """ cons = [] for subsection in cp.get_subsections(constraint_section): name = cp.get_opt_tag(constraint_section, "name", subsection) constraint_arg = cp.get_opt_tag( constraint_section, "constraint_arg", subsection) # get any other keyword arguments kwargs = {} section = constraint_section + "-" + subsection extra_opts = [key for key in cp.options(section) if key not in ["name", "constraint_arg"]] for key in extra_opts: val = cp.get(section, key) if key == "required_parameters": val = val.split(_VARARGS_DELIM) else: try: val = float(val) except ValueError: pass kwargs[key] = val cons.append(constraints.constraints[name](constraint_arg, transforms=transforms, **kwargs)) return cons
Loads parameter constraints from a configuration file. Parameters ---------- cp : WorkflowConfigParser An open config parser to read from. transforms : list, optional List of transforms to apply to parameters before applying constraints. constraint_section : str, optional The section to get the constraints from. Default is 'constraint'. Returns ------- list List of ``Constraint`` objects. Empty if no constraints were provided.
Below is the the instruction that describes the task: ### Input: Loads parameter constraints from a configuration file. Parameters ---------- cp : WorkflowConfigParser An open config parser to read from. transforms : list, optional List of transforms to apply to parameters before applying constraints. constraint_section : str, optional The section to get the constraints from. Default is 'constraint'. Returns ------- list List of ``Constraint`` objects. Empty if no constraints were provided. ### Response: def read_constraints_from_config(cp, transforms=None, constraint_section='constraint'): """Loads parameter constraints from a configuration file. Parameters ---------- cp : WorkflowConfigParser An open config parser to read from. transforms : list, optional List of transforms to apply to parameters before applying constraints. constraint_section : str, optional The section to get the constraints from. Default is 'constraint'. Returns ------- list List of ``Constraint`` objects. Empty if no constraints were provided. """ cons = [] for subsection in cp.get_subsections(constraint_section): name = cp.get_opt_tag(constraint_section, "name", subsection) constraint_arg = cp.get_opt_tag( constraint_section, "constraint_arg", subsection) # get any other keyword arguments kwargs = {} section = constraint_section + "-" + subsection extra_opts = [key for key in cp.options(section) if key not in ["name", "constraint_arg"]] for key in extra_opts: val = cp.get(section, key) if key == "required_parameters": val = val.split(_VARARGS_DELIM) else: try: val = float(val) except ValueError: pass kwargs[key] = val cons.append(constraints.constraints[name](constraint_arg, transforms=transforms, **kwargs)) return cons
def getAppInfo(self, verbose=None): """ App version and other basic information will be provided. :param verbose: print more :returns: 200: successful operation """ surl=self.___url sv=surl.split('/')[-1] surl=surl.rstrip(sv+'/') response=api(url=surl+'/cyndex2/v1', method="GET", verbose=verbose, parse_params=False) return response
App version and other basic information will be provided. :param verbose: print more :returns: 200: successful operation
Below is the the instruction that describes the task: ### Input: App version and other basic information will be provided. :param verbose: print more :returns: 200: successful operation ### Response: def getAppInfo(self, verbose=None): """ App version and other basic information will be provided. :param verbose: print more :returns: 200: successful operation """ surl=self.___url sv=surl.split('/')[-1] surl=surl.rstrip(sv+'/') response=api(url=surl+'/cyndex2/v1', method="GET", verbose=verbose, parse_params=False) return response
def use_openssl(libcrypto_path, libssl_path, trust_list_path=None): """ Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll), or using a specific dynamic library on Linux/BSD (.so). This can also be used to configure oscrypto to use LibreSSL dynamic libraries. This method must be called before any oscrypto submodules are imported. :param libcrypto_path: A unicode string of the file path to the OpenSSL/LibreSSL libcrypto dynamic library. :param libssl_path: A unicode string of the file path to the OpenSSL/LibreSSL libssl dynamic library. :param trust_list_path: An optional unicode string of the path to a file containing OpenSSL-compatible CA certificates in PEM format. If this is not provided and the platform is OS X or Windows, the system trust roots will be exported from the OS and used for all TLS connections. :raises: ValueError - when one of the paths is not a unicode string OSError - when the trust_list_path does not exist on the filesystem oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem RuntimeError - when this function is called after another part of oscrypto has been imported """ if not isinstance(libcrypto_path, str_cls): raise ValueError('libcrypto_path must be a unicode string, not %s' % type_name(libcrypto_path)) if not isinstance(libssl_path, str_cls): raise ValueError('libssl_path must be a unicode string, not %s' % type_name(libssl_path)) if not os.path.exists(libcrypto_path): raise LibraryNotFoundError('libcrypto does not exist at %s' % libcrypto_path) if not os.path.exists(libssl_path): raise LibraryNotFoundError('libssl does not exist at %s' % libssl_path) if trust_list_path is not None: if not isinstance(trust_list_path, str_cls): raise ValueError('trust_list_path must be a unicode string, not %s' % type_name(trust_list_path)) if not os.path.exists(trust_list_path): raise OSError('trust_list_path does not exist at %s' % trust_list_path) with _backend_lock: if _module_values['backend'] is not None: raise RuntimeError('Another part of oscrypto has already been imported, unable to force use of OpenSSL') _module_values['backend'] = 'openssl' _module_values['backend_config'] = { 'libcrypto_path': libcrypto_path, 'libssl_path': libssl_path, 'trust_list_path': trust_list_path, }
Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll), or using a specific dynamic library on Linux/BSD (.so). This can also be used to configure oscrypto to use LibreSSL dynamic libraries. This method must be called before any oscrypto submodules are imported. :param libcrypto_path: A unicode string of the file path to the OpenSSL/LibreSSL libcrypto dynamic library. :param libssl_path: A unicode string of the file path to the OpenSSL/LibreSSL libssl dynamic library. :param trust_list_path: An optional unicode string of the path to a file containing OpenSSL-compatible CA certificates in PEM format. If this is not provided and the platform is OS X or Windows, the system trust roots will be exported from the OS and used for all TLS connections. :raises: ValueError - when one of the paths is not a unicode string OSError - when the trust_list_path does not exist on the filesystem oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem RuntimeError - when this function is called after another part of oscrypto has been imported
Below is the the instruction that describes the task: ### Input: Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll), or using a specific dynamic library on Linux/BSD (.so). This can also be used to configure oscrypto to use LibreSSL dynamic libraries. This method must be called before any oscrypto submodules are imported. :param libcrypto_path: A unicode string of the file path to the OpenSSL/LibreSSL libcrypto dynamic library. :param libssl_path: A unicode string of the file path to the OpenSSL/LibreSSL libssl dynamic library. :param trust_list_path: An optional unicode string of the path to a file containing OpenSSL-compatible CA certificates in PEM format. If this is not provided and the platform is OS X or Windows, the system trust roots will be exported from the OS and used for all TLS connections. :raises: ValueError - when one of the paths is not a unicode string OSError - when the trust_list_path does not exist on the filesystem oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem RuntimeError - when this function is called after another part of oscrypto has been imported ### Response: def use_openssl(libcrypto_path, libssl_path, trust_list_path=None): """ Forces using OpenSSL dynamic libraries on OS X (.dylib) or Windows (.dll), or using a specific dynamic library on Linux/BSD (.so). This can also be used to configure oscrypto to use LibreSSL dynamic libraries. This method must be called before any oscrypto submodules are imported. :param libcrypto_path: A unicode string of the file path to the OpenSSL/LibreSSL libcrypto dynamic library. :param libssl_path: A unicode string of the file path to the OpenSSL/LibreSSL libssl dynamic library. :param trust_list_path: An optional unicode string of the path to a file containing OpenSSL-compatible CA certificates in PEM format. If this is not provided and the platform is OS X or Windows, the system trust roots will be exported from the OS and used for all TLS connections. :raises: ValueError - when one of the paths is not a unicode string OSError - when the trust_list_path does not exist on the filesystem oscrypto.errors.LibraryNotFoundError - when one of the path does not exist on the filesystem RuntimeError - when this function is called after another part of oscrypto has been imported """ if not isinstance(libcrypto_path, str_cls): raise ValueError('libcrypto_path must be a unicode string, not %s' % type_name(libcrypto_path)) if not isinstance(libssl_path, str_cls): raise ValueError('libssl_path must be a unicode string, not %s' % type_name(libssl_path)) if not os.path.exists(libcrypto_path): raise LibraryNotFoundError('libcrypto does not exist at %s' % libcrypto_path) if not os.path.exists(libssl_path): raise LibraryNotFoundError('libssl does not exist at %s' % libssl_path) if trust_list_path is not None: if not isinstance(trust_list_path, str_cls): raise ValueError('trust_list_path must be a unicode string, not %s' % type_name(trust_list_path)) if not os.path.exists(trust_list_path): raise OSError('trust_list_path does not exist at %s' % trust_list_path) with _backend_lock: if _module_values['backend'] is not None: raise RuntimeError('Another part of oscrypto has already been imported, unable to force use of OpenSSL') _module_values['backend'] = 'openssl' _module_values['backend_config'] = { 'libcrypto_path': libcrypto_path, 'libssl_path': libssl_path, 'trust_list_path': trust_list_path, }
def _get_shift_matrix(self): """np.array: The Camera's lens-shift matrix.""" return np.array([[1., 0., self.x_shift, 0.], [0., 1., self.y_shift, 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]], dtype=np.float32)
np.array: The Camera's lens-shift matrix.
Below is the the instruction that describes the task: ### Input: np.array: The Camera's lens-shift matrix. ### Response: def _get_shift_matrix(self): """np.array: The Camera's lens-shift matrix.""" return np.array([[1., 0., self.x_shift, 0.], [0., 1., self.y_shift, 0.], [0., 0., 1., 0.], [0., 0., 0., 1.]], dtype=np.float32)
def filter(self, value, model=None, context=None): """ Filter Performs value filtering and returns filtered result. :param value: input value :param model: parent model being validated :param context: object, filtering context :return: filtered value """ value = str(value) return bleach.clean(text=value, **self.bleach_params)
Filter Performs value filtering and returns filtered result. :param value: input value :param model: parent model being validated :param context: object, filtering context :return: filtered value
Below is the the instruction that describes the task: ### Input: Filter Performs value filtering and returns filtered result. :param value: input value :param model: parent model being validated :param context: object, filtering context :return: filtered value ### Response: def filter(self, value, model=None, context=None): """ Filter Performs value filtering and returns filtered result. :param value: input value :param model: parent model being validated :param context: object, filtering context :return: filtered value """ value = str(value) return bleach.clean(text=value, **self.bleach_params)