code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def profiler(self): """ Calculates the core profile for each strain """ printtime('Calculating core profiles', self.start) # Only create the profile if it doesn't exist already # if not os.path.isfile('{}/profile.txt'.format(self.profilelocation)): for strain in self.corealleles: # Add the gene name and allele number pair for each core gene in each strain self.coreset.add(tuple(sorted(self.corealleles[strain].items()))) # Set the header to be similar to an MLST profile - ST,gene1,gene2,etc header = 'ST,{}\n'.format(','.join(sorted(self.geneset))) data = '' for count, core in sorted(enumerate(self.coreset)): # Increment count now to account for 0-based numbering count += 1 # Add the sequence type number to the profile data += '{}'.format(count) # Store the sequence type for each strain for strain in self.corealleles: if tuple(sorted(self.corealleles[strain].items())) == core: self.profiles[strain] = count # Add the allele number for each gene for gene in sorted(core): data += ',{}'.format(gene[1]) data += '\n' # Write the profile with open(os.path.join(self.profilelocation, 'profile.txt'), 'w') as profile: profile.write(header) profile.write(data) # Create a list of which strains correspond to the sequence types self.linker()
Calculates the core profile for each strain
Below is the the instruction that describes the task: ### Input: Calculates the core profile for each strain ### Response: def profiler(self): """ Calculates the core profile for each strain """ printtime('Calculating core profiles', self.start) # Only create the profile if it doesn't exist already # if not os.path.isfile('{}/profile.txt'.format(self.profilelocation)): for strain in self.corealleles: # Add the gene name and allele number pair for each core gene in each strain self.coreset.add(tuple(sorted(self.corealleles[strain].items()))) # Set the header to be similar to an MLST profile - ST,gene1,gene2,etc header = 'ST,{}\n'.format(','.join(sorted(self.geneset))) data = '' for count, core in sorted(enumerate(self.coreset)): # Increment count now to account for 0-based numbering count += 1 # Add the sequence type number to the profile data += '{}'.format(count) # Store the sequence type for each strain for strain in self.corealleles: if tuple(sorted(self.corealleles[strain].items())) == core: self.profiles[strain] = count # Add the allele number for each gene for gene in sorted(core): data += ',{}'.format(gene[1]) data += '\n' # Write the profile with open(os.path.join(self.profilelocation, 'profile.txt'), 'w') as profile: profile.write(header) profile.write(data) # Create a list of which strains correspond to the sequence types self.linker()
def get_column_labels(command_name): '''return dictionary of column labels if available''' cmd = cmd_reverse_lookup(command_name) if cmd == 0: return {} labels = {} enum = mavutil.mavlink.enums['MAV_CMD'][cmd] for col in enum.param.keys(): labels[col] = make_column_label(command_name, enum.param[col], "P%u" % col) return labels
return dictionary of column labels if available
Below is the the instruction that describes the task: ### Input: return dictionary of column labels if available ### Response: def get_column_labels(command_name): '''return dictionary of column labels if available''' cmd = cmd_reverse_lookup(command_name) if cmd == 0: return {} labels = {} enum = mavutil.mavlink.enums['MAV_CMD'][cmd] for col in enum.param.keys(): labels[col] = make_column_label(command_name, enum.param[col], "P%u" % col) return labels
def verify_url(url, secret_key, **kwargs): """ Verify a signed URL (excluding the domain and scheme). :param url: URL to sign :param secret_key: Secret key :rtype: bool :raises: URLError """ result = urlparse(url) query_args = MultiValueDict(parse_qs(result.query)) return verify_url_path(result.path, query_args, secret_key, **kwargs)
Verify a signed URL (excluding the domain and scheme). :param url: URL to sign :param secret_key: Secret key :rtype: bool :raises: URLError
Below is the the instruction that describes the task: ### Input: Verify a signed URL (excluding the domain and scheme). :param url: URL to sign :param secret_key: Secret key :rtype: bool :raises: URLError ### Response: def verify_url(url, secret_key, **kwargs): """ Verify a signed URL (excluding the domain and scheme). :param url: URL to sign :param secret_key: Secret key :rtype: bool :raises: URLError """ result = urlparse(url) query_args = MultiValueDict(parse_qs(result.query)) return verify_url_path(result.path, query_args, secret_key, **kwargs)
def getEngineChangelist(self): """ Returns the compatible Perforce changelist identifier for the latest installed version of UE4 """ # Newer versions of the engine use the key "CompatibleChangelist", older ones use "Changelist" version = self._getEngineVersionDetails() if 'CompatibleChangelist' in version: return int(version['CompatibleChangelist']) else: return int(version['Changelist'])
Returns the compatible Perforce changelist identifier for the latest installed version of UE4
Below is the the instruction that describes the task: ### Input: Returns the compatible Perforce changelist identifier for the latest installed version of UE4 ### Response: def getEngineChangelist(self): """ Returns the compatible Perforce changelist identifier for the latest installed version of UE4 """ # Newer versions of the engine use the key "CompatibleChangelist", older ones use "Changelist" version = self._getEngineVersionDetails() if 'CompatibleChangelist' in version: return int(version['CompatibleChangelist']) else: return int(version['Changelist'])
def mknfold(dall, nfold, param, seed, evals=(), fpreproc=None): """ Make an n-fold list of CVPack from random indices. """ evals = list(evals) np.random.seed(seed) randidx = np.random.permutation(dall.num_row()) kstep = len(randidx) / nfold idset = [randidx[(i * kstep): min(len(randidx), (i + 1) * kstep)] for i in range(nfold)] ret = [] for k in range(nfold): dtrain = dall.slice(np.concatenate([idset[i] for i in range(nfold) if k != i])) dtest = dall.slice(idset[k]) # run preprocessing on the data set if needed if fpreproc is not None: dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy()) else: tparam = param plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals] ret.append(CVPack(dtrain, dtest, plst)) return ret
Make an n-fold list of CVPack from random indices.
Below is the the instruction that describes the task: ### Input: Make an n-fold list of CVPack from random indices. ### Response: def mknfold(dall, nfold, param, seed, evals=(), fpreproc=None): """ Make an n-fold list of CVPack from random indices. """ evals = list(evals) np.random.seed(seed) randidx = np.random.permutation(dall.num_row()) kstep = len(randidx) / nfold idset = [randidx[(i * kstep): min(len(randidx), (i + 1) * kstep)] for i in range(nfold)] ret = [] for k in range(nfold): dtrain = dall.slice(np.concatenate([idset[i] for i in range(nfold) if k != i])) dtest = dall.slice(idset[k]) # run preprocessing on the data set if needed if fpreproc is not None: dtrain, dtest, tparam = fpreproc(dtrain, dtest, param.copy()) else: tparam = param plst = list(tparam.items()) + [('eval_metric', itm) for itm in evals] ret.append(CVPack(dtrain, dtest, plst)) return ret
def _pypsa_storage_timeseries(network, timesteps, mode=None): """ Timeseries in PyPSA compatible format for storage instances Parameters ---------- network : Network The eDisGo grid topology model overall container timesteps : array_like Timesteps is an array-like object with entries of type :pandas:`pandas.Timestamp<timestamp>` specifying which time steps to export to pypsa representation and use in power flow analysis. mode : str, optional Specifically retrieve generator time series for MV or LV grid level or both. Either choose 'mv' or 'lv'. Defaults to None, which returns both timeseries for MV and LV in a single DataFrame. Returns ------- :pandas:`pandas.DataFrame<dataframe>` Time series table in PyPSA format """ mv_storage_timeseries_p_min = [] mv_storage_timeseries_p_max = [] # MV storage time series if mode is 'mv' or mode is None: for storage in network.mv_grid.graph.nodes_by_attribute('storage'): mv_storage_timeseries_p_min.append( storage.timeseries.p.rename(repr( storage)).to_frame().loc[timesteps]) mv_storage_timeseries_p_max.append( storage.timeseries.p.rename(repr( storage)).to_frame().loc[timesteps]) storage_df_p_min = pd.concat( mv_storage_timeseries_p_min, axis=1) storage_df_p_max = pd.concat( mv_storage_timeseries_p_max, axis=1) return storage_df_p_min, storage_df_p_max
Timeseries in PyPSA compatible format for storage instances Parameters ---------- network : Network The eDisGo grid topology model overall container timesteps : array_like Timesteps is an array-like object with entries of type :pandas:`pandas.Timestamp<timestamp>` specifying which time steps to export to pypsa representation and use in power flow analysis. mode : str, optional Specifically retrieve generator time series for MV or LV grid level or both. Either choose 'mv' or 'lv'. Defaults to None, which returns both timeseries for MV and LV in a single DataFrame. Returns ------- :pandas:`pandas.DataFrame<dataframe>` Time series table in PyPSA format
Below is the the instruction that describes the task: ### Input: Timeseries in PyPSA compatible format for storage instances Parameters ---------- network : Network The eDisGo grid topology model overall container timesteps : array_like Timesteps is an array-like object with entries of type :pandas:`pandas.Timestamp<timestamp>` specifying which time steps to export to pypsa representation and use in power flow analysis. mode : str, optional Specifically retrieve generator time series for MV or LV grid level or both. Either choose 'mv' or 'lv'. Defaults to None, which returns both timeseries for MV and LV in a single DataFrame. Returns ------- :pandas:`pandas.DataFrame<dataframe>` Time series table in PyPSA format ### Response: def _pypsa_storage_timeseries(network, timesteps, mode=None): """ Timeseries in PyPSA compatible format for storage instances Parameters ---------- network : Network The eDisGo grid topology model overall container timesteps : array_like Timesteps is an array-like object with entries of type :pandas:`pandas.Timestamp<timestamp>` specifying which time steps to export to pypsa representation and use in power flow analysis. mode : str, optional Specifically retrieve generator time series for MV or LV grid level or both. Either choose 'mv' or 'lv'. Defaults to None, which returns both timeseries for MV and LV in a single DataFrame. Returns ------- :pandas:`pandas.DataFrame<dataframe>` Time series table in PyPSA format """ mv_storage_timeseries_p_min = [] mv_storage_timeseries_p_max = [] # MV storage time series if mode is 'mv' or mode is None: for storage in network.mv_grid.graph.nodes_by_attribute('storage'): mv_storage_timeseries_p_min.append( storage.timeseries.p.rename(repr( storage)).to_frame().loc[timesteps]) mv_storage_timeseries_p_max.append( storage.timeseries.p.rename(repr( storage)).to_frame().loc[timesteps]) storage_df_p_min = pd.concat( mv_storage_timeseries_p_min, axis=1) storage_df_p_max = pd.concat( mv_storage_timeseries_p_max, axis=1) return storage_df_p_min, storage_df_p_max
def extract(self, text: str) -> List[Extraction]: """ Splits text by sentences. Args: text (str): Input text to be extracted. Returns: List[Extraction]: the list of extraction or the empty list if there are no matches. """ doc = self._parser(text) extractions = list() for sent in doc.sents: this_extraction = Extraction(value=sent.text, extractor_name=self.name, start_token=sent[0], end_token=sent[-1], start_char=sent.text[0], end_char=sent.text[-1]) extractions.append(this_extraction) return extractions
Splits text by sentences. Args: text (str): Input text to be extracted. Returns: List[Extraction]: the list of extraction or the empty list if there are no matches.
Below is the the instruction that describes the task: ### Input: Splits text by sentences. Args: text (str): Input text to be extracted. Returns: List[Extraction]: the list of extraction or the empty list if there are no matches. ### Response: def extract(self, text: str) -> List[Extraction]: """ Splits text by sentences. Args: text (str): Input text to be extracted. Returns: List[Extraction]: the list of extraction or the empty list if there are no matches. """ doc = self._parser(text) extractions = list() for sent in doc.sents: this_extraction = Extraction(value=sent.text, extractor_name=self.name, start_token=sent[0], end_token=sent[-1], start_char=sent.text[0], end_char=sent.text[-1]) extractions.append(this_extraction) return extractions
def evaluateR2derivs(Pot,R,z,phi=None,t=0.): """ NAME: evaluateR2derivs PURPOSE: convenience function to evaluate a possible sum of potentials INPUT: Pot - a potential or list of potentials (dissipative forces in such a list are ignored) R - cylindrical Galactocentric distance (can be Quantity) z - distance above the plane (can be Quantity) phi - azimuth (optional; can be Quantity) t - time (optional; can be Quantity) OUTPUT: d2Phi/d2R(R,z,phi,t) HISTORY: 2012-07-25 - Written - Bovy (IAS) """ isList= isinstance(Pot,list) nonAxi= _isNonAxi(Pot) if nonAxi and phi is None: raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi") if isList: sum= 0. for pot in Pot: if not isinstance(pot,DissipativeForce): sum+= pot.R2deriv(R,z,phi=phi,t=t,use_physical=False) return sum elif isinstance(Pot,Potential): return Pot.R2deriv(R,z,phi=phi,t=t,use_physical=False) else: #pragma: no cover raise PotentialError("Input to 'evaluateR2derivs' is neither a Potential-instance or a list of such instances")
NAME: evaluateR2derivs PURPOSE: convenience function to evaluate a possible sum of potentials INPUT: Pot - a potential or list of potentials (dissipative forces in such a list are ignored) R - cylindrical Galactocentric distance (can be Quantity) z - distance above the plane (can be Quantity) phi - azimuth (optional; can be Quantity) t - time (optional; can be Quantity) OUTPUT: d2Phi/d2R(R,z,phi,t) HISTORY: 2012-07-25 - Written - Bovy (IAS)
Below is the the instruction that describes the task: ### Input: NAME: evaluateR2derivs PURPOSE: convenience function to evaluate a possible sum of potentials INPUT: Pot - a potential or list of potentials (dissipative forces in such a list are ignored) R - cylindrical Galactocentric distance (can be Quantity) z - distance above the plane (can be Quantity) phi - azimuth (optional; can be Quantity) t - time (optional; can be Quantity) OUTPUT: d2Phi/d2R(R,z,phi,t) HISTORY: 2012-07-25 - Written - Bovy (IAS) ### Response: def evaluateR2derivs(Pot,R,z,phi=None,t=0.): """ NAME: evaluateR2derivs PURPOSE: convenience function to evaluate a possible sum of potentials INPUT: Pot - a potential or list of potentials (dissipative forces in such a list are ignored) R - cylindrical Galactocentric distance (can be Quantity) z - distance above the plane (can be Quantity) phi - azimuth (optional; can be Quantity) t - time (optional; can be Quantity) OUTPUT: d2Phi/d2R(R,z,phi,t) HISTORY: 2012-07-25 - Written - Bovy (IAS) """ isList= isinstance(Pot,list) nonAxi= _isNonAxi(Pot) if nonAxi and phi is None: raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi") if isList: sum= 0. for pot in Pot: if not isinstance(pot,DissipativeForce): sum+= pot.R2deriv(R,z,phi=phi,t=t,use_physical=False) return sum elif isinstance(Pot,Potential): return Pot.R2deriv(R,z,phi=phi,t=t,use_physical=False) else: #pragma: no cover raise PotentialError("Input to 'evaluateR2derivs' is neither a Potential-instance or a list of such instances")
def references(self, env, object_name, model, assoc_class, result_class_name, role, result_role, keys_only): """Instrument Associations. All four association-related operations (Associators, AssociatorNames, References, ReferenceNames) are mapped to this method. This method is a python generator Keyword arguments: env -- Provider Environment (pycimmb.ProviderEnvironment) object_name -- A pywbem.CIMInstanceName that defines the source CIM Object whose associated Objects are to be returned. model -- A template pywbem.CIMInstance to serve as a model of the objects to be returned. Only properties present on this model need to be set. assoc_class -- The pywbem.CIMClass. result_class_name -- If not empty, this string acts as a filter on the returned set of Instances by mandating that each returned Instances MUST represent an association between object_name and an Instance of a Class whose name matches this parameter or a subclass. role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST refer to object_name via a Property whose name matches the value of this parameter. result_role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST represent associations of object_name to other Instances, where the other Instances play the specified result_role in the association (i.e. the name of the Property in the Association Class that refers to the Object related to object_name MUST match the value of this parameter). keys_only -- A boolean. True if only the key properties should be set on the generated instances. The following diagram may be helpful in understanding the role, result_role, and result_class_name parameters. +------------------------+ +-------------------+ | object_name.classname | | result_class_name | | ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ | +------------------------+ +-------------------+ | +-----------------------------------+ | | | [Association] assoc_class | | | object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | | +--------------+ object_name.classname REF role | | (CIMInstanceName) | result_class_name REF result_role +------+ | |(CIMInstanceName) +-----------------------------------+ Possible Errors: CIM_ERR_ACCESS_DENIED CIM_ERR_NOT_SUPPORTED CIM_ERR_INVALID_NAMESPACE CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized or otherwise incorrect parameters) CIM_ERR_FAILED (some other unspecified error occurred) """ pass
Instrument Associations. All four association-related operations (Associators, AssociatorNames, References, ReferenceNames) are mapped to this method. This method is a python generator Keyword arguments: env -- Provider Environment (pycimmb.ProviderEnvironment) object_name -- A pywbem.CIMInstanceName that defines the source CIM Object whose associated Objects are to be returned. model -- A template pywbem.CIMInstance to serve as a model of the objects to be returned. Only properties present on this model need to be set. assoc_class -- The pywbem.CIMClass. result_class_name -- If not empty, this string acts as a filter on the returned set of Instances by mandating that each returned Instances MUST represent an association between object_name and an Instance of a Class whose name matches this parameter or a subclass. role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST refer to object_name via a Property whose name matches the value of this parameter. result_role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST represent associations of object_name to other Instances, where the other Instances play the specified result_role in the association (i.e. the name of the Property in the Association Class that refers to the Object related to object_name MUST match the value of this parameter). keys_only -- A boolean. True if only the key properties should be set on the generated instances. The following diagram may be helpful in understanding the role, result_role, and result_class_name parameters. +------------------------+ +-------------------+ | object_name.classname | | result_class_name | | ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ | +------------------------+ +-------------------+ | +-----------------------------------+ | | | [Association] assoc_class | | | object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | | +--------------+ object_name.classname REF role | | (CIMInstanceName) | result_class_name REF result_role +------+ | |(CIMInstanceName) +-----------------------------------+ Possible Errors: CIM_ERR_ACCESS_DENIED CIM_ERR_NOT_SUPPORTED CIM_ERR_INVALID_NAMESPACE CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized or otherwise incorrect parameters) CIM_ERR_FAILED (some other unspecified error occurred)
Below is the the instruction that describes the task: ### Input: Instrument Associations. All four association-related operations (Associators, AssociatorNames, References, ReferenceNames) are mapped to this method. This method is a python generator Keyword arguments: env -- Provider Environment (pycimmb.ProviderEnvironment) object_name -- A pywbem.CIMInstanceName that defines the source CIM Object whose associated Objects are to be returned. model -- A template pywbem.CIMInstance to serve as a model of the objects to be returned. Only properties present on this model need to be set. assoc_class -- The pywbem.CIMClass. result_class_name -- If not empty, this string acts as a filter on the returned set of Instances by mandating that each returned Instances MUST represent an association between object_name and an Instance of a Class whose name matches this parameter or a subclass. role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST refer to object_name via a Property whose name matches the value of this parameter. result_role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST represent associations of object_name to other Instances, where the other Instances play the specified result_role in the association (i.e. the name of the Property in the Association Class that refers to the Object related to object_name MUST match the value of this parameter). keys_only -- A boolean. True if only the key properties should be set on the generated instances. The following diagram may be helpful in understanding the role, result_role, and result_class_name parameters. +------------------------+ +-------------------+ | object_name.classname | | result_class_name | | ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ | +------------------------+ +-------------------+ | +-----------------------------------+ | | | [Association] assoc_class | | | object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | | +--------------+ object_name.classname REF role | | (CIMInstanceName) | result_class_name REF result_role +------+ | |(CIMInstanceName) +-----------------------------------+ Possible Errors: CIM_ERR_ACCESS_DENIED CIM_ERR_NOT_SUPPORTED CIM_ERR_INVALID_NAMESPACE CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized or otherwise incorrect parameters) CIM_ERR_FAILED (some other unspecified error occurred) ### Response: def references(self, env, object_name, model, assoc_class, result_class_name, role, result_role, keys_only): """Instrument Associations. All four association-related operations (Associators, AssociatorNames, References, ReferenceNames) are mapped to this method. This method is a python generator Keyword arguments: env -- Provider Environment (pycimmb.ProviderEnvironment) object_name -- A pywbem.CIMInstanceName that defines the source CIM Object whose associated Objects are to be returned. model -- A template pywbem.CIMInstance to serve as a model of the objects to be returned. Only properties present on this model need to be set. assoc_class -- The pywbem.CIMClass. result_class_name -- If not empty, this string acts as a filter on the returned set of Instances by mandating that each returned Instances MUST represent an association between object_name and an Instance of a Class whose name matches this parameter or a subclass. role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST refer to object_name via a Property whose name matches the value of this parameter. result_role -- If not empty, MUST be a valid Property name. It acts as a filter on the returned set of Instances by mandating that each returned Instance MUST represent associations of object_name to other Instances, where the other Instances play the specified result_role in the association (i.e. the name of the Property in the Association Class that refers to the Object related to object_name MUST match the value of this parameter). keys_only -- A boolean. True if only the key properties should be set on the generated instances. The following diagram may be helpful in understanding the role, result_role, and result_class_name parameters. +------------------------+ +-------------------+ | object_name.classname | | result_class_name | | ~~~~~~~~~~~~~~~~~~~~~ | | ~~~~~~~~~~~~~~~~~ | +------------------------+ +-------------------+ | +-----------------------------------+ | | | [Association] assoc_class | | | object_name | ~~~~~~~~~~~~~~~~~~~~~~~~~ | | +--------------+ object_name.classname REF role | | (CIMInstanceName) | result_class_name REF result_role +------+ | |(CIMInstanceName) +-----------------------------------+ Possible Errors: CIM_ERR_ACCESS_DENIED CIM_ERR_NOT_SUPPORTED CIM_ERR_INVALID_NAMESPACE CIM_ERR_INVALID_PARAMETER (including missing, duplicate, unrecognized or otherwise incorrect parameters) CIM_ERR_FAILED (some other unspecified error occurred) """ pass
def editor_multi_agent_example(): """This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary. """ agent_definitions = [ AgentDefinition("uav0", agents.UavAgent, [Sensors.PIXEL_CAMERA, Sensors.LOCATION_SENSOR]), AgentDefinition("uav1", agents.UavAgent, [Sensors.LOCATION_SENSOR, Sensors.VELOCITY_SENSOR]) ] env = HolodeckEnvironment(agent_definitions, start_world=False) cmd0 = np.array([0, 0, -2, 10]) cmd1 = np.array([0, 0, 5, 10]) for i in range(10): env.reset() env.act("uav0", cmd0) env.act("uav1", cmd1) for _ in range(1000): states = env.tick() uav0_terminal = states["uav0"][Sensors.TERMINAL] uav1_reward = states["uav1"][Sensors.REWARD]
This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary.
Below is the the instruction that describes the task: ### Input: This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary. ### Response: def editor_multi_agent_example(): """This editor example shows how to interact with holodeck worlds that have multiple agents. This is specifically for when working with UE4 directly and not a prebuilt binary. """ agent_definitions = [ AgentDefinition("uav0", agents.UavAgent, [Sensors.PIXEL_CAMERA, Sensors.LOCATION_SENSOR]), AgentDefinition("uav1", agents.UavAgent, [Sensors.LOCATION_SENSOR, Sensors.VELOCITY_SENSOR]) ] env = HolodeckEnvironment(agent_definitions, start_world=False) cmd0 = np.array([0, 0, -2, 10]) cmd1 = np.array([0, 0, 5, 10]) for i in range(10): env.reset() env.act("uav0", cmd0) env.act("uav1", cmd1) for _ in range(1000): states = env.tick() uav0_terminal = states["uav0"][Sensors.TERMINAL] uav1_reward = states["uav1"][Sensors.REWARD]
def derive_signature(key, qs): """Derives the signature from the supplied query string using the key.""" key, qs = (key or "", qs or "") return hmac.new(key.encode(), qs.encode(), hashlib.sha1).hexdigest()
Derives the signature from the supplied query string using the key.
Below is the the instruction that describes the task: ### Input: Derives the signature from the supplied query string using the key. ### Response: def derive_signature(key, qs): """Derives the signature from the supplied query string using the key.""" key, qs = (key or "", qs or "") return hmac.new(key.encode(), qs.encode(), hashlib.sha1).hexdigest()
def all_departed_units(self): """ Collection of all units that were previously part of any relation on this endpoint but which have since departed. This collection is persistent and mutable. The departed units will be kept until they are explicitly removed, to allow for reasonable cleanup of units that have left. Example: You need to run a command each time a unit departs the relation. .. code-block:: python @when('endpoint.{endpoint_name}.departed') def handle_departed_unit(self): for name, unit in self.all_departed_units.items(): # run the command to remove `unit` from the cluster # .. self.all_departed_units.clear() clear_flag(self.expand_name('departed')) Once a unit is departed, it will no longer show up in :attr:`all_joined_units`. Note that units are considered departed as soon as the departed hook is entered, which differs slightly from how the Juju primitives behave (departing units are still returned from ``related-units`` until after the departed hook is complete). This collection is a :class:`KeyList`, so can be used as a mapping to look up units by their unit name, or iterated or accessed by index. """ if self._all_departed_units is None: self._all_departed_units = CachedKeyList.load( 'reactive.endpoints.departed.{}'.format(self.endpoint_name), RelatedUnit._deserialize, 'unit_name') return self._all_departed_units
Collection of all units that were previously part of any relation on this endpoint but which have since departed. This collection is persistent and mutable. The departed units will be kept until they are explicitly removed, to allow for reasonable cleanup of units that have left. Example: You need to run a command each time a unit departs the relation. .. code-block:: python @when('endpoint.{endpoint_name}.departed') def handle_departed_unit(self): for name, unit in self.all_departed_units.items(): # run the command to remove `unit` from the cluster # .. self.all_departed_units.clear() clear_flag(self.expand_name('departed')) Once a unit is departed, it will no longer show up in :attr:`all_joined_units`. Note that units are considered departed as soon as the departed hook is entered, which differs slightly from how the Juju primitives behave (departing units are still returned from ``related-units`` until after the departed hook is complete). This collection is a :class:`KeyList`, so can be used as a mapping to look up units by their unit name, or iterated or accessed by index.
Below is the the instruction that describes the task: ### Input: Collection of all units that were previously part of any relation on this endpoint but which have since departed. This collection is persistent and mutable. The departed units will be kept until they are explicitly removed, to allow for reasonable cleanup of units that have left. Example: You need to run a command each time a unit departs the relation. .. code-block:: python @when('endpoint.{endpoint_name}.departed') def handle_departed_unit(self): for name, unit in self.all_departed_units.items(): # run the command to remove `unit` from the cluster # .. self.all_departed_units.clear() clear_flag(self.expand_name('departed')) Once a unit is departed, it will no longer show up in :attr:`all_joined_units`. Note that units are considered departed as soon as the departed hook is entered, which differs slightly from how the Juju primitives behave (departing units are still returned from ``related-units`` until after the departed hook is complete). This collection is a :class:`KeyList`, so can be used as a mapping to look up units by their unit name, or iterated or accessed by index. ### Response: def all_departed_units(self): """ Collection of all units that were previously part of any relation on this endpoint but which have since departed. This collection is persistent and mutable. The departed units will be kept until they are explicitly removed, to allow for reasonable cleanup of units that have left. Example: You need to run a command each time a unit departs the relation. .. code-block:: python @when('endpoint.{endpoint_name}.departed') def handle_departed_unit(self): for name, unit in self.all_departed_units.items(): # run the command to remove `unit` from the cluster # .. self.all_departed_units.clear() clear_flag(self.expand_name('departed')) Once a unit is departed, it will no longer show up in :attr:`all_joined_units`. Note that units are considered departed as soon as the departed hook is entered, which differs slightly from how the Juju primitives behave (departing units are still returned from ``related-units`` until after the departed hook is complete). This collection is a :class:`KeyList`, so can be used as a mapping to look up units by their unit name, or iterated or accessed by index. """ if self._all_departed_units is None: self._all_departed_units = CachedKeyList.load( 'reactive.endpoints.departed.{}'.format(self.endpoint_name), RelatedUnit._deserialize, 'unit_name') return self._all_departed_units
def zadd(self, key, score, member, mode, client=None): """ Like ZADD, but supports different score update modes, in case the member already exists in the ZSET: - "nx": Don't update the score - "xx": Only update elements that already exist. Never add elements. - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score """ if mode == 'nx': f = self._zadd_noupdate elif mode == 'xx': f = self._zadd_update_existing elif mode == 'min': f = self._zadd_update_min elif mode == 'max': f = self._zadd_update_max else: raise NotImplementedError('mode "%s" unsupported' % mode) return f(keys=[key], args=[score, member], client=client)
Like ZADD, but supports different score update modes, in case the member already exists in the ZSET: - "nx": Don't update the score - "xx": Only update elements that already exist. Never add elements. - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score
Below is the the instruction that describes the task: ### Input: Like ZADD, but supports different score update modes, in case the member already exists in the ZSET: - "nx": Don't update the score - "xx": Only update elements that already exist. Never add elements. - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score ### Response: def zadd(self, key, score, member, mode, client=None): """ Like ZADD, but supports different score update modes, in case the member already exists in the ZSET: - "nx": Don't update the score - "xx": Only update elements that already exist. Never add elements. - "min": Use the smaller of the given and existing score - "max": Use the larger of the given and existing score """ if mode == 'nx': f = self._zadd_noupdate elif mode == 'xx': f = self._zadd_update_existing elif mode == 'min': f = self._zadd_update_min elif mode == 'max': f = self._zadd_update_max else: raise NotImplementedError('mode "%s" unsupported' % mode) return f(keys=[key], args=[score, member], client=client)
def load_operations_from_docstring(docstring): """Return a dictionary of OpenAPI operations parsed from a a docstring. """ doc_data = load_yaml_from_docstring(docstring) return { key: val for key, val in iteritems(doc_data) if key in PATH_KEYS or key.startswith("x-") }
Return a dictionary of OpenAPI operations parsed from a a docstring.
Below is the the instruction that describes the task: ### Input: Return a dictionary of OpenAPI operations parsed from a a docstring. ### Response: def load_operations_from_docstring(docstring): """Return a dictionary of OpenAPI operations parsed from a a docstring. """ doc_data = load_yaml_from_docstring(docstring) return { key: val for key, val in iteritems(doc_data) if key in PATH_KEYS or key.startswith("x-") }
def write_transcriptions(utterances: List[Utterance], tgt_dir: Path, ext: str, lazy: bool) -> None: """ Write the utterance transcriptions to files in the tgt_dir. Is lazy and checks if the file already exists. Args: utterances: A list of Utterance objects to be written. tgt_dir: The directory in which to write the text of the utterances, one file per utterance. ext: The file extension for the utterances. Typically something like "phonemes", or "phonemes_and_tones". """ tgt_dir.mkdir(parents=True, exist_ok=True) for utter in utterances: out_path = tgt_dir / "{}.{}".format(utter.prefix, ext) if lazy and out_path.is_file(): continue with out_path.open("w") as f: print(utter.text, file=f)
Write the utterance transcriptions to files in the tgt_dir. Is lazy and checks if the file already exists. Args: utterances: A list of Utterance objects to be written. tgt_dir: The directory in which to write the text of the utterances, one file per utterance. ext: The file extension for the utterances. Typically something like "phonemes", or "phonemes_and_tones".
Below is the the instruction that describes the task: ### Input: Write the utterance transcriptions to files in the tgt_dir. Is lazy and checks if the file already exists. Args: utterances: A list of Utterance objects to be written. tgt_dir: The directory in which to write the text of the utterances, one file per utterance. ext: The file extension for the utterances. Typically something like "phonemes", or "phonemes_and_tones". ### Response: def write_transcriptions(utterances: List[Utterance], tgt_dir: Path, ext: str, lazy: bool) -> None: """ Write the utterance transcriptions to files in the tgt_dir. Is lazy and checks if the file already exists. Args: utterances: A list of Utterance objects to be written. tgt_dir: The directory in which to write the text of the utterances, one file per utterance. ext: The file extension for the utterances. Typically something like "phonemes", or "phonemes_and_tones". """ tgt_dir.mkdir(parents=True, exist_ok=True) for utter in utterances: out_path = tgt_dir / "{}.{}".format(utter.prefix, ext) if lazy and out_path.is_file(): continue with out_path.open("w") as f: print(utter.text, file=f)
def _detach_process(): """ Detach daemon process. Forks the current process into a parent and a detached child. The child process resides in its own process group, has no controlling terminal attached and is cleaned up by the init process. Returns ``True`` for the parent and ``False`` for the child. """ # To detach from our process group we need to call ``setsid``. We # can only do that if we aren't a process group leader. Therefore # we fork once, which makes sure that the new child process is not # a process group leader. pid = os.fork() if pid > 0: # Parent process # Use waitpid to "collect" the child process and avoid Zombies os.waitpid(pid, 0) return True os.setsid() # We now fork a second time and let the second's fork parent exit. # This makes the second fork's child process an orphan. Orphans are # cleaned up by the init process, so we won't end up with a zombie. # In addition, the second fork's child is no longer a session # leader and can therefore never acquire a controlling terminal. pid = os.fork() if pid > 0: os._exit(os.EX_OK) return False
Detach daemon process. Forks the current process into a parent and a detached child. The child process resides in its own process group, has no controlling terminal attached and is cleaned up by the init process. Returns ``True`` for the parent and ``False`` for the child.
Below is the the instruction that describes the task: ### Input: Detach daemon process. Forks the current process into a parent and a detached child. The child process resides in its own process group, has no controlling terminal attached and is cleaned up by the init process. Returns ``True`` for the parent and ``False`` for the child. ### Response: def _detach_process(): """ Detach daemon process. Forks the current process into a parent and a detached child. The child process resides in its own process group, has no controlling terminal attached and is cleaned up by the init process. Returns ``True`` for the parent and ``False`` for the child. """ # To detach from our process group we need to call ``setsid``. We # can only do that if we aren't a process group leader. Therefore # we fork once, which makes sure that the new child process is not # a process group leader. pid = os.fork() if pid > 0: # Parent process # Use waitpid to "collect" the child process and avoid Zombies os.waitpid(pid, 0) return True os.setsid() # We now fork a second time and let the second's fork parent exit. # This makes the second fork's child process an orphan. Orphans are # cleaned up by the init process, so we won't end up with a zombie. # In addition, the second fork's child is no longer a session # leader and can therefore never acquire a controlling terminal. pid = os.fork() if pid > 0: os._exit(os.EX_OK) return False
def wait_spot_requests_active(ec2, requests, timeout=None, tentative=False): """ Wait until no spot request in the given iterator is in the 'open' state or, optionally, a timeout occurs. Yield spot requests as soon as they leave the 'open' state. :param Iterator[SpotInstanceRequest] requests: :param float timeout: Maximum time in seconds to spend waiting or None to wait forever. If a timeout occurs, the remaining open requests will be cancelled. :param bool tentative: if True, give up on a spot request at the earliest indication of it not being fulfilled immediately :rtype: Iterator[list[SpotInstanceRequest]] """ if timeout is not None: timeout = time.time() + timeout active_ids = set() other_ids = set() open_ids = None def cancel(): log.warn('Cancelling remaining %i spot requests.', len(open_ids)) ec2.cancel_spot_instance_requests(list(open_ids)) def spot_request_not_found(e): error_code = 'InvalidSpotInstanceRequestID.NotFound' return isinstance(e, EC2ResponseError) and e.error_code == error_code try: while True: open_ids, eval_ids, fulfill_ids = set(), set(), set() batch = [] for r in requests: if r.state == 'open': open_ids.add(r.id) if r.status.code == 'pending-evaluation': eval_ids.add(r.id) elif r.status.code == 'pending-fulfillment': fulfill_ids.add(r.id) else: log.info( 'Request %s entered status %s indicating that it will not be ' 'fulfilled anytime soon.', r.id, r.status.code) elif r.state == 'active': assert r.id not in active_ids active_ids.add(r.id) batch.append(r) else: assert r.id not in other_ids other_ids.add(r.id) batch.append(r) if batch: yield batch log.info('%i spot requests(s) are open (%i of which are pending evaluation and %i ' 'are pending fulfillment), %i are active and %i are in another state.', *map(len, (open_ids, eval_ids, fulfill_ids, active_ids, other_ids))) if not open_ids or tentative and not eval_ids and not fulfill_ids: break sleep_time = 2 * a_short_time if timeout is not None and time.time() + sleep_time >= timeout: log.warn('Timed out waiting for spot requests.') break log.info('Sleeping for %is', sleep_time) time.sleep(sleep_time) for attempt in retry_ec2(retry_while=spot_request_not_found): with attempt: requests = ec2.get_all_spot_instance_requests( list(open_ids)) except BaseException: if open_ids: with panic(log): cancel() raise else: if open_ids: cancel()
Wait until no spot request in the given iterator is in the 'open' state or, optionally, a timeout occurs. Yield spot requests as soon as they leave the 'open' state. :param Iterator[SpotInstanceRequest] requests: :param float timeout: Maximum time in seconds to spend waiting or None to wait forever. If a timeout occurs, the remaining open requests will be cancelled. :param bool tentative: if True, give up on a spot request at the earliest indication of it not being fulfilled immediately :rtype: Iterator[list[SpotInstanceRequest]]
Below is the the instruction that describes the task: ### Input: Wait until no spot request in the given iterator is in the 'open' state or, optionally, a timeout occurs. Yield spot requests as soon as they leave the 'open' state. :param Iterator[SpotInstanceRequest] requests: :param float timeout: Maximum time in seconds to spend waiting or None to wait forever. If a timeout occurs, the remaining open requests will be cancelled. :param bool tentative: if True, give up on a spot request at the earliest indication of it not being fulfilled immediately :rtype: Iterator[list[SpotInstanceRequest]] ### Response: def wait_spot_requests_active(ec2, requests, timeout=None, tentative=False): """ Wait until no spot request in the given iterator is in the 'open' state or, optionally, a timeout occurs. Yield spot requests as soon as they leave the 'open' state. :param Iterator[SpotInstanceRequest] requests: :param float timeout: Maximum time in seconds to spend waiting or None to wait forever. If a timeout occurs, the remaining open requests will be cancelled. :param bool tentative: if True, give up on a spot request at the earliest indication of it not being fulfilled immediately :rtype: Iterator[list[SpotInstanceRequest]] """ if timeout is not None: timeout = time.time() + timeout active_ids = set() other_ids = set() open_ids = None def cancel(): log.warn('Cancelling remaining %i spot requests.', len(open_ids)) ec2.cancel_spot_instance_requests(list(open_ids)) def spot_request_not_found(e): error_code = 'InvalidSpotInstanceRequestID.NotFound' return isinstance(e, EC2ResponseError) and e.error_code == error_code try: while True: open_ids, eval_ids, fulfill_ids = set(), set(), set() batch = [] for r in requests: if r.state == 'open': open_ids.add(r.id) if r.status.code == 'pending-evaluation': eval_ids.add(r.id) elif r.status.code == 'pending-fulfillment': fulfill_ids.add(r.id) else: log.info( 'Request %s entered status %s indicating that it will not be ' 'fulfilled anytime soon.', r.id, r.status.code) elif r.state == 'active': assert r.id not in active_ids active_ids.add(r.id) batch.append(r) else: assert r.id not in other_ids other_ids.add(r.id) batch.append(r) if batch: yield batch log.info('%i spot requests(s) are open (%i of which are pending evaluation and %i ' 'are pending fulfillment), %i are active and %i are in another state.', *map(len, (open_ids, eval_ids, fulfill_ids, active_ids, other_ids))) if not open_ids or tentative and not eval_ids and not fulfill_ids: break sleep_time = 2 * a_short_time if timeout is not None and time.time() + sleep_time >= timeout: log.warn('Timed out waiting for spot requests.') break log.info('Sleeping for %is', sleep_time) time.sleep(sleep_time) for attempt in retry_ec2(retry_while=spot_request_not_found): with attempt: requests = ec2.get_all_spot_instance_requests( list(open_ids)) except BaseException: if open_ids: with panic(log): cancel() raise else: if open_ids: cancel()
def get_run_params(self) -> Dict: """Return the params to run the celery task.""" params = {} if self.celery_queue: params['queue'] = self.celery_queue if self.timeout: params['soft_time_limit'] = self.timeout # We set also a hard time limit that will send sig 9 # This hard time limit should not happened, as it will set inconsistent state params['time_limit'] = self.timeout + settings.CELERY_HARD_TIME_LIMIT_DELAY if self.execute_at: params['eta'] = self.execute_at return params
Return the params to run the celery task.
Below is the the instruction that describes the task: ### Input: Return the params to run the celery task. ### Response: def get_run_params(self) -> Dict: """Return the params to run the celery task.""" params = {} if self.celery_queue: params['queue'] = self.celery_queue if self.timeout: params['soft_time_limit'] = self.timeout # We set also a hard time limit that will send sig 9 # This hard time limit should not happened, as it will set inconsistent state params['time_limit'] = self.timeout + settings.CELERY_HARD_TIME_LIMIT_DELAY if self.execute_at: params['eta'] = self.execute_at return params
def humanTime(seconds): ''' Convert seconds to something more human-friendly ''' intervals = ['days', 'hours', 'minutes', 'seconds'] x = deltaTime(seconds=seconds) return ' '.join('{} {}'.format(getattr(x, k), k) for k in intervals if getattr(x, k))
Convert seconds to something more human-friendly
Below is the the instruction that describes the task: ### Input: Convert seconds to something more human-friendly ### Response: def humanTime(seconds): ''' Convert seconds to something more human-friendly ''' intervals = ['days', 'hours', 'minutes', 'seconds'] x = deltaTime(seconds=seconds) return ' '.join('{} {}'.format(getattr(x, k), k) for k in intervals if getattr(x, k))
def transform(self, X): ''' Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least ``self.min_eig``. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. Only different from X if X is the training data. ''' n = self.train_.shape[0] if X.ndim != 2 or X.shape[1] != n: msg = "X should have {} columns, the number of samples at fit time" raise TypeError(msg.format(n)) if self.copy: X = X.copy() if self.shift_ != 0 and X is self.train_ or ( X.shape == self.train_.shape and np.allclose(X, self.train_)): X[xrange(n), xrange(n)] += self.shift_ return X
Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least ``self.min_eig``. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. Only different from X if X is the training data.
Below is the the instruction that describes the task: ### Input: Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least ``self.min_eig``. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. Only different from X if X is the training data. ### Response: def transform(self, X): ''' Transforms X according to the linear transformation corresponding to shifting the input eigenvalues to all be at least ``self.min_eig``. Parameters ---------- X : array, shape [n_test, n] The test similarities to training points. Returns ------- Xt : array, shape [n_test, n] The transformed test similarites to training points. Only different from X if X is the training data. ''' n = self.train_.shape[0] if X.ndim != 2 or X.shape[1] != n: msg = "X should have {} columns, the number of samples at fit time" raise TypeError(msg.format(n)) if self.copy: X = X.copy() if self.shift_ != 0 and X is self.train_ or ( X.shape == self.train_.shape and np.allclose(X, self.train_)): X[xrange(n), xrange(n)] += self.shift_ return X
def show_key(kwargs=None, call=None): ''' List the keys available ''' if call != 'function': log.error( 'The list_keys function must be called with -f or --function.' ) return False if not kwargs: kwargs = {} if 'keyname' not in kwargs: log.error('A keyname is required.') return False rcode, data = query( command='my/keys/{0}'.format(kwargs['keyname']), method='GET', ) return {'keys': {data['name']: data['key']}}
List the keys available
Below is the the instruction that describes the task: ### Input: List the keys available ### Response: def show_key(kwargs=None, call=None): ''' List the keys available ''' if call != 'function': log.error( 'The list_keys function must be called with -f or --function.' ) return False if not kwargs: kwargs = {} if 'keyname' not in kwargs: log.error('A keyname is required.') return False rcode, data = query( command='my/keys/{0}'.format(kwargs['keyname']), method='GET', ) return {'keys': {data['name']: data['key']}}
def Cpig_coeffs(counts): r'''Computes the ideal-gas polynomial heat capacity coefficients of an organic compound using the Joback method as a function of chemical structure only. .. math:: C_p^{ig} = \sum_i a_i - 37.93 + \left[ \sum_i b_i + 0.210 \right] T + \left[ \sum_i c_i - 3.91 \cdot 10^{-4} \right] T^2 + \left[\sum_i d_i + 2.06 \cdot 10^{-7}\right] T^3 288 compounds were used by Joback in this determination. No overall error was reported. The ideal gas heat capacity values used in developing the heat capacity polynomials used 9 data points between 298 K and 1000 K. Parameters ---------- counts : dict Dictionary of Joback groups present (numerically indexed) and their counts, [-] Returns ------- coefficients : list[float] Coefficients which will result in a calculated heat capacity in in units of J/mol/K, [-] Examples -------- >>> c = Joback.Cpig_coeffs({1: 2, 24: 1}) >>> c [7.520000000000003, 0.26084, -0.0001207, 1.545999999999998e-08] >>> Cp = lambda T : c[0] + c[1]*T + c[2]*T**2 + c[3]*T**3 >>> Cp(300) 75.32642000000001 ''' a, b, c, d = 0.0, 0.0, 0.0, 0.0 for group, count in counts.items(): a += joback_groups_id_dict[group].Cpa*count b += joback_groups_id_dict[group].Cpb*count c += joback_groups_id_dict[group].Cpc*count d += joback_groups_id_dict[group].Cpd*count a -= 37.93 b += 0.210 c -= 3.91E-4 d += 2.06E-7 return [a, b, c, d]
r'''Computes the ideal-gas polynomial heat capacity coefficients of an organic compound using the Joback method as a function of chemical structure only. .. math:: C_p^{ig} = \sum_i a_i - 37.93 + \left[ \sum_i b_i + 0.210 \right] T + \left[ \sum_i c_i - 3.91 \cdot 10^{-4} \right] T^2 + \left[\sum_i d_i + 2.06 \cdot 10^{-7}\right] T^3 288 compounds were used by Joback in this determination. No overall error was reported. The ideal gas heat capacity values used in developing the heat capacity polynomials used 9 data points between 298 K and 1000 K. Parameters ---------- counts : dict Dictionary of Joback groups present (numerically indexed) and their counts, [-] Returns ------- coefficients : list[float] Coefficients which will result in a calculated heat capacity in in units of J/mol/K, [-] Examples -------- >>> c = Joback.Cpig_coeffs({1: 2, 24: 1}) >>> c [7.520000000000003, 0.26084, -0.0001207, 1.545999999999998e-08] >>> Cp = lambda T : c[0] + c[1]*T + c[2]*T**2 + c[3]*T**3 >>> Cp(300) 75.32642000000001
Below is the the instruction that describes the task: ### Input: r'''Computes the ideal-gas polynomial heat capacity coefficients of an organic compound using the Joback method as a function of chemical structure only. .. math:: C_p^{ig} = \sum_i a_i - 37.93 + \left[ \sum_i b_i + 0.210 \right] T + \left[ \sum_i c_i - 3.91 \cdot 10^{-4} \right] T^2 + \left[\sum_i d_i + 2.06 \cdot 10^{-7}\right] T^3 288 compounds were used by Joback in this determination. No overall error was reported. The ideal gas heat capacity values used in developing the heat capacity polynomials used 9 data points between 298 K and 1000 K. Parameters ---------- counts : dict Dictionary of Joback groups present (numerically indexed) and their counts, [-] Returns ------- coefficients : list[float] Coefficients which will result in a calculated heat capacity in in units of J/mol/K, [-] Examples -------- >>> c = Joback.Cpig_coeffs({1: 2, 24: 1}) >>> c [7.520000000000003, 0.26084, -0.0001207, 1.545999999999998e-08] >>> Cp = lambda T : c[0] + c[1]*T + c[2]*T**2 + c[3]*T**3 >>> Cp(300) 75.32642000000001 ### Response: def Cpig_coeffs(counts): r'''Computes the ideal-gas polynomial heat capacity coefficients of an organic compound using the Joback method as a function of chemical structure only. .. math:: C_p^{ig} = \sum_i a_i - 37.93 + \left[ \sum_i b_i + 0.210 \right] T + \left[ \sum_i c_i - 3.91 \cdot 10^{-4} \right] T^2 + \left[\sum_i d_i + 2.06 \cdot 10^{-7}\right] T^3 288 compounds were used by Joback in this determination. No overall error was reported. The ideal gas heat capacity values used in developing the heat capacity polynomials used 9 data points between 298 K and 1000 K. Parameters ---------- counts : dict Dictionary of Joback groups present (numerically indexed) and their counts, [-] Returns ------- coefficients : list[float] Coefficients which will result in a calculated heat capacity in in units of J/mol/K, [-] Examples -------- >>> c = Joback.Cpig_coeffs({1: 2, 24: 1}) >>> c [7.520000000000003, 0.26084, -0.0001207, 1.545999999999998e-08] >>> Cp = lambda T : c[0] + c[1]*T + c[2]*T**2 + c[3]*T**3 >>> Cp(300) 75.32642000000001 ''' a, b, c, d = 0.0, 0.0, 0.0, 0.0 for group, count in counts.items(): a += joback_groups_id_dict[group].Cpa*count b += joback_groups_id_dict[group].Cpb*count c += joback_groups_id_dict[group].Cpc*count d += joback_groups_id_dict[group].Cpd*count a -= 37.93 b += 0.210 c -= 3.91E-4 d += 2.06E-7 return [a, b, c, d]
def cmd_lsch(self): """lsch List the channels, showing the current one. """ names = list(self.fv.get_channel_names()) names.sort() if len(names) == 0: self.log("No channels") return res = [] cur_ch = self.fv.get_channel_info() for name in names: if (cur_ch is not None) and (cur_ch.name == name): res.append("=>%s" % (name)) else: res.append(" %s" % (name)) self.log("\n".join(res))
lsch List the channels, showing the current one.
Below is the the instruction that describes the task: ### Input: lsch List the channels, showing the current one. ### Response: def cmd_lsch(self): """lsch List the channels, showing the current one. """ names = list(self.fv.get_channel_names()) names.sort() if len(names) == 0: self.log("No channels") return res = [] cur_ch = self.fv.get_channel_info() for name in names: if (cur_ch is not None) and (cur_ch.name == name): res.append("=>%s" % (name)) else: res.append(" %s" % (name)) self.log("\n".join(res))
def set_zerg_client_params(self, server_sockets, use_fallback_socket=None): """Zerg mode. Zergs params. :param str|unicode|list[str|unicode] server_sockets: Attaches zerg to a zerg server. :param bool use_fallback_socket: Fallback to normal sockets if the zerg server is not available """ self._set('zerg', server_sockets, multi=True) if use_fallback_socket is not None: self._set('zerg-fallback', use_fallback_socket, cast=bool) for socket in listify(server_sockets): self._section.networking.register_socket(self._section.networking.sockets.default(socket)) return self._section
Zerg mode. Zergs params. :param str|unicode|list[str|unicode] server_sockets: Attaches zerg to a zerg server. :param bool use_fallback_socket: Fallback to normal sockets if the zerg server is not available
Below is the the instruction that describes the task: ### Input: Zerg mode. Zergs params. :param str|unicode|list[str|unicode] server_sockets: Attaches zerg to a zerg server. :param bool use_fallback_socket: Fallback to normal sockets if the zerg server is not available ### Response: def set_zerg_client_params(self, server_sockets, use_fallback_socket=None): """Zerg mode. Zergs params. :param str|unicode|list[str|unicode] server_sockets: Attaches zerg to a zerg server. :param bool use_fallback_socket: Fallback to normal sockets if the zerg server is not available """ self._set('zerg', server_sockets, multi=True) if use_fallback_socket is not None: self._set('zerg-fallback', use_fallback_socket, cast=bool) for socket in listify(server_sockets): self._section.networking.register_socket(self._section.networking.sockets.default(socket)) return self._section
def plot_featured(*args, **kwargs): """ Wrapper for matplotlib.pyplot.plot() / errorbar(). Takes options: * 'error': if true, use :func:`matplotlib.pyplot.errorbar` instead of :func:`matplotlib.pyplot.plot`. *\*args* and *\*\*kwargs* passed through here. * 'fig': figure to use. * 'figlabel': figure label. * 'legend': legend location. * 'toplabel': top label of plot. * 'xlabel': x-label of plot. * 'ylabel': y-label of plot. """ # Strip off options specific to plot_featured toplabel = kwargs.pop('toplabel', None) xlabel = kwargs.pop('xlabel', None) ylabel = kwargs.pop('ylabel', None) legend = kwargs.pop('legend', None) error = kwargs.pop('error', None) # save = kwargs.pop('save', False) figlabel = kwargs.pop('figlabel', None) fig = kwargs.pop('fig', None) if figlabel is not None: fig = _figure(figlabel) elif fig is None: try: fig = _plt.gcf() except: fig = _plt.fig() # Pass everything else to plot if error is None: _plt.plot(*args, **kwargs) else: _plt.errorbar(*args, **kwargs) # Format plot as desired _addlabel(toplabel, xlabel, ylabel, fig=fig) if legend is not None: _plt.legend(legend) return fig
Wrapper for matplotlib.pyplot.plot() / errorbar(). Takes options: * 'error': if true, use :func:`matplotlib.pyplot.errorbar` instead of :func:`matplotlib.pyplot.plot`. *\*args* and *\*\*kwargs* passed through here. * 'fig': figure to use. * 'figlabel': figure label. * 'legend': legend location. * 'toplabel': top label of plot. * 'xlabel': x-label of plot. * 'ylabel': y-label of plot.
Below is the the instruction that describes the task: ### Input: Wrapper for matplotlib.pyplot.plot() / errorbar(). Takes options: * 'error': if true, use :func:`matplotlib.pyplot.errorbar` instead of :func:`matplotlib.pyplot.plot`. *\*args* and *\*\*kwargs* passed through here. * 'fig': figure to use. * 'figlabel': figure label. * 'legend': legend location. * 'toplabel': top label of plot. * 'xlabel': x-label of plot. * 'ylabel': y-label of plot. ### Response: def plot_featured(*args, **kwargs): """ Wrapper for matplotlib.pyplot.plot() / errorbar(). Takes options: * 'error': if true, use :func:`matplotlib.pyplot.errorbar` instead of :func:`matplotlib.pyplot.plot`. *\*args* and *\*\*kwargs* passed through here. * 'fig': figure to use. * 'figlabel': figure label. * 'legend': legend location. * 'toplabel': top label of plot. * 'xlabel': x-label of plot. * 'ylabel': y-label of plot. """ # Strip off options specific to plot_featured toplabel = kwargs.pop('toplabel', None) xlabel = kwargs.pop('xlabel', None) ylabel = kwargs.pop('ylabel', None) legend = kwargs.pop('legend', None) error = kwargs.pop('error', None) # save = kwargs.pop('save', False) figlabel = kwargs.pop('figlabel', None) fig = kwargs.pop('fig', None) if figlabel is not None: fig = _figure(figlabel) elif fig is None: try: fig = _plt.gcf() except: fig = _plt.fig() # Pass everything else to plot if error is None: _plt.plot(*args, **kwargs) else: _plt.errorbar(*args, **kwargs) # Format plot as desired _addlabel(toplabel, xlabel, ylabel, fig=fig) if legend is not None: _plt.legend(legend) return fig
def supports_version_type(self, version_type): """Tests if the given version type is supported. arg: version_type (osid.type.Type): a version Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``VERSION`` raise: NullArgument - ``version_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.Metadata.supports_coordinate_type if self._kwargs['syntax'] not in ['``VERSION``']: raise errors.IllegalState() return version_type in self.get_version_types
Tests if the given version type is supported. arg: version_type (osid.type.Type): a version Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``VERSION`` raise: NullArgument - ``version_type`` is ``null`` *compliance: mandatory -- This method must be implemented.*
Below is the the instruction that describes the task: ### Input: Tests if the given version type is supported. arg: version_type (osid.type.Type): a version Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``VERSION`` raise: NullArgument - ``version_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* ### Response: def supports_version_type(self, version_type): """Tests if the given version type is supported. arg: version_type (osid.type.Type): a version Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``VERSION`` raise: NullArgument - ``version_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.Metadata.supports_coordinate_type if self._kwargs['syntax'] not in ['``VERSION``']: raise errors.IllegalState() return version_type in self.get_version_types
def save_config( self, cmd="copy running-configuration startup-configuration", confirm=False, confirm_response="", ): """Saves Config""" return super(DellForce10SSH, self).save_config( cmd=cmd, confirm=confirm, confirm_response=confirm_response )
Saves Config
Below is the the instruction that describes the task: ### Input: Saves Config ### Response: def save_config( self, cmd="copy running-configuration startup-configuration", confirm=False, confirm_response="", ): """Saves Config""" return super(DellForce10SSH, self).save_config( cmd=cmd, confirm=confirm, confirm_response=confirm_response )
def _broadcast_and_set_attrs(self, local_dict): """Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1 """ del local_dict['self'] self.remove_axis = False max_length = 0 for key in local_dict: try: length = len(local_dict[key]) if length > max_length: max_length = length except TypeError: pass if max_length == 0: self.remove_axis = True for key in local_dict: setattr(self, key, np.array([local_dict[key]])) # check for bad length arrays else: for key in local_dict: try: if len(local_dict[key]) < max_length and len(local_dict[key]) > 1: raise ValueError("Casting parameters not correct." + " Need all at a maximum shape and the rest being" + "len-1 arrays or scalars") except TypeError: pass # broadcast arrays for key in local_dict: try: if len(local_dict[key]) == max_length: setattr(self, key, local_dict[key]) elif len(local_dict[key]) == 1: setattr(self, key, np.full((max_length,), local_dict[key][0])) except TypeError: setattr(self, key, np.full((max_length,), local_dict[key])) return
Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1
Below is the the instruction that describes the task: ### Input: Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1 ### Response: def _broadcast_and_set_attrs(self, local_dict): """Cast all inputs to correct dimensions. This method fixes inputs who have different lengths. Namely one input as an array and others that are scalara or of len-1. Raises: Value Error: Multiple length arrays of len>1 """ del local_dict['self'] self.remove_axis = False max_length = 0 for key in local_dict: try: length = len(local_dict[key]) if length > max_length: max_length = length except TypeError: pass if max_length == 0: self.remove_axis = True for key in local_dict: setattr(self, key, np.array([local_dict[key]])) # check for bad length arrays else: for key in local_dict: try: if len(local_dict[key]) < max_length and len(local_dict[key]) > 1: raise ValueError("Casting parameters not correct." + " Need all at a maximum shape and the rest being" + "len-1 arrays or scalars") except TypeError: pass # broadcast arrays for key in local_dict: try: if len(local_dict[key]) == max_length: setattr(self, key, local_dict[key]) elif len(local_dict[key]) == 1: setattr(self, key, np.full((max_length,), local_dict[key][0])) except TypeError: setattr(self, key, np.full((max_length,), local_dict[key])) return
def inv_slots_preferred(self): """ List of all available inventory slots in the preferred search order. Does not include the additional slots from the open window. 1. active slot 2. remainder of the hotbar 3. remainder of the persistent inventory """ slots = [self.active_slot] slots.extend(slot for slot in self.window.hotbar_slots if slot != self.active_slot) slots.extend(self.window.inventory_slots) return slots
List of all available inventory slots in the preferred search order. Does not include the additional slots from the open window. 1. active slot 2. remainder of the hotbar 3. remainder of the persistent inventory
Below is the the instruction that describes the task: ### Input: List of all available inventory slots in the preferred search order. Does not include the additional slots from the open window. 1. active slot 2. remainder of the hotbar 3. remainder of the persistent inventory ### Response: def inv_slots_preferred(self): """ List of all available inventory slots in the preferred search order. Does not include the additional slots from the open window. 1. active slot 2. remainder of the hotbar 3. remainder of the persistent inventory """ slots = [self.active_slot] slots.extend(slot for slot in self.window.hotbar_slots if slot != self.active_slot) slots.extend(self.window.inventory_slots) return slots
def generate(self, x, **kwargs): """ Returns the graph for Fast Gradient Method adversarial examples. :param x: The model's symbolic inputs. :param kwargs: See `parse_params` """ # Parse and save attack-specific parameters assert self.parse_params(**kwargs) labels, _nb_classes = self.get_or_guess_labels(x, kwargs) return fgm( x, self.model.get_logits(x), y=labels, eps=self.eps, ord=self.ord, clip_min=self.clip_min, clip_max=self.clip_max, targeted=(self.y_target is not None), sanity_checks=self.sanity_checks)
Returns the graph for Fast Gradient Method adversarial examples. :param x: The model's symbolic inputs. :param kwargs: See `parse_params`
Below is the the instruction that describes the task: ### Input: Returns the graph for Fast Gradient Method adversarial examples. :param x: The model's symbolic inputs. :param kwargs: See `parse_params` ### Response: def generate(self, x, **kwargs): """ Returns the graph for Fast Gradient Method adversarial examples. :param x: The model's symbolic inputs. :param kwargs: See `parse_params` """ # Parse and save attack-specific parameters assert self.parse_params(**kwargs) labels, _nb_classes = self.get_or_guess_labels(x, kwargs) return fgm( x, self.model.get_logits(x), y=labels, eps=self.eps, ord=self.ord, clip_min=self.clip_min, clip_max=self.clip_max, targeted=(self.y_target is not None), sanity_checks=self.sanity_checks)
def setup(cls, client_id, client_secret): """Configure client in session """ cls.client_id = client_id cls.client_secret = client_secret
Configure client in session
Below is the the instruction that describes the task: ### Input: Configure client in session ### Response: def setup(cls, client_id, client_secret): """Configure client in session """ cls.client_id = client_id cls.client_secret = client_secret
def addNamespace(self, namespace, **context): """ Creates a new namespace within this database. :param namespace: <str> """ self.connection().addNamespace(namespace, orb.Context(**context))
Creates a new namespace within this database. :param namespace: <str>
Below is the the instruction that describes the task: ### Input: Creates a new namespace within this database. :param namespace: <str> ### Response: def addNamespace(self, namespace, **context): """ Creates a new namespace within this database. :param namespace: <str> """ self.connection().addNamespace(namespace, orb.Context(**context))
def FALSE(classical_reg): """ Produce a FALSE instruction. :param classical_reg: A classical register to modify. :return: An instruction object representing the equivalent MOVE. """ warn("`FALSE a` has been deprecated. Use `MOVE a 0` instead.") if isinstance(classical_reg, int): classical_reg = Addr(classical_reg) return MOVE(classical_reg, 0)
Produce a FALSE instruction. :param classical_reg: A classical register to modify. :return: An instruction object representing the equivalent MOVE.
Below is the the instruction that describes the task: ### Input: Produce a FALSE instruction. :param classical_reg: A classical register to modify. :return: An instruction object representing the equivalent MOVE. ### Response: def FALSE(classical_reg): """ Produce a FALSE instruction. :param classical_reg: A classical register to modify. :return: An instruction object representing the equivalent MOVE. """ warn("`FALSE a` has been deprecated. Use `MOVE a 0` instead.") if isinstance(classical_reg, int): classical_reg = Addr(classical_reg) return MOVE(classical_reg, 0)
def wait(self): """ Waits for the pool to be fully stopped """ while True: if not self.greenlet_watch: break if self.stopping: gevent.sleep(0.1) else: gevent.sleep(1)
Waits for the pool to be fully stopped
Below is the the instruction that describes the task: ### Input: Waits for the pool to be fully stopped ### Response: def wait(self): """ Waits for the pool to be fully stopped """ while True: if not self.greenlet_watch: break if self.stopping: gevent.sleep(0.1) else: gevent.sleep(1)
def parse(bin_payload): """ Interpret a block's nulldata back into a SHA256. The first three bytes (2 magic + 1 opcode) will not be present in bin_payload. """ message_hash = hexlify(bin_payload) if not is_hex( message_hash ): log.warning("Not a message hash") return None if len(message_hash) != 40: log.warning("Not a 160-bit hash") return None return { 'opcode': 'ANNOUNCE', 'message_hash': message_hash }
Interpret a block's nulldata back into a SHA256. The first three bytes (2 magic + 1 opcode) will not be present in bin_payload.
Below is the the instruction that describes the task: ### Input: Interpret a block's nulldata back into a SHA256. The first three bytes (2 magic + 1 opcode) will not be present in bin_payload. ### Response: def parse(bin_payload): """ Interpret a block's nulldata back into a SHA256. The first three bytes (2 magic + 1 opcode) will not be present in bin_payload. """ message_hash = hexlify(bin_payload) if not is_hex( message_hash ): log.warning("Not a message hash") return None if len(message_hash) != 40: log.warning("Not a 160-bit hash") return None return { 'opcode': 'ANNOUNCE', 'message_hash': message_hash }
def delete_contribution(self, url): """Delete the contribution with this identifier :rtype: bool :returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist) """ # first validate that this is a real contrib try: result = self.api_request(url) if 'url' in result and 'documents' in result: self.api_request(result['url'], method='DELETE') return True except: pass return False
Delete the contribution with this identifier :rtype: bool :returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist)
Below is the the instruction that describes the task: ### Input: Delete the contribution with this identifier :rtype: bool :returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist) ### Response: def delete_contribution(self, url): """Delete the contribution with this identifier :rtype: bool :returns: True if the contribution was deleted, False otherwise (eg. if it didn't exist) """ # first validate that this is a real contrib try: result = self.api_request(url) if 'url' in result and 'documents' in result: self.api_request(result['url'], method='DELETE') return True except: pass return False
def _resolve_squash_cache(self, client): """ Currently doing a "squash" basically negates the cache for any subsequent layers. But we can work around this by A) checking if the cache was successful for the _unsquashed_ version of the image, and B) if so, re-using an older squashed version of the image. Three ways to do this: 1. get the shas of the before/after images from `image.history` comments OR the output stream (or both). Both are extremely brittle, but also easy to access 2. Build the image without squash first. If the unsquashed image sha matches a cached one, substitute the unsuqashed image for the squashed one. If no match, re-run the steps with squash=True and store the resulting pair Less brittle than 1., but harder and defs not elegant 3. Use docker-squash as a dependency - this is by far the most preferable solution, except that they don't yet support the newest docker sdk version. Currently option 1 is implemented - we parse the comment string in the image history to figure out which layers the image was squashed from """ from .staging import BUILD_CACHEDIR history = client.api.history(self.buildname) comment = history[0].get('Comment', '').split() if len(comment) != 4 or comment[0] != 'merge' or comment[2] != 'to': print('WARNING: failed to parse this image\'s pre-squash history. ' 'The build will continue, but all subsequent layers will be rebuilt.') return squashed_sha = history[0]['Id'] start_squash_sha = comment[1] end_squash_sha = comment[3] cprint(' Layers %s to %s were squashed.' % (start_squash_sha, end_squash_sha), 'yellow') # check cache squashcache = os.path.join(BUILD_CACHEDIR, 'squashes') if not os.path.exists(squashcache): os.makedirs(squashcache) cachepath = os.path.join(BUILD_CACHEDIR, 'squashes', '%s-%s' % (start_squash_sha, end_squash_sha)) # on hit, tag the squashedsha as the result of this build step if os.path.exists(cachepath): self._get_squashed_layer_cache(client, squashed_sha, cachepath) else: self._cache_squashed_layer(squashed_sha, cachepath)
Currently doing a "squash" basically negates the cache for any subsequent layers. But we can work around this by A) checking if the cache was successful for the _unsquashed_ version of the image, and B) if so, re-using an older squashed version of the image. Three ways to do this: 1. get the shas of the before/after images from `image.history` comments OR the output stream (or both). Both are extremely brittle, but also easy to access 2. Build the image without squash first. If the unsquashed image sha matches a cached one, substitute the unsuqashed image for the squashed one. If no match, re-run the steps with squash=True and store the resulting pair Less brittle than 1., but harder and defs not elegant 3. Use docker-squash as a dependency - this is by far the most preferable solution, except that they don't yet support the newest docker sdk version. Currently option 1 is implemented - we parse the comment string in the image history to figure out which layers the image was squashed from
Below is the the instruction that describes the task: ### Input: Currently doing a "squash" basically negates the cache for any subsequent layers. But we can work around this by A) checking if the cache was successful for the _unsquashed_ version of the image, and B) if so, re-using an older squashed version of the image. Three ways to do this: 1. get the shas of the before/after images from `image.history` comments OR the output stream (or both). Both are extremely brittle, but also easy to access 2. Build the image without squash first. If the unsquashed image sha matches a cached one, substitute the unsuqashed image for the squashed one. If no match, re-run the steps with squash=True and store the resulting pair Less brittle than 1., but harder and defs not elegant 3. Use docker-squash as a dependency - this is by far the most preferable solution, except that they don't yet support the newest docker sdk version. Currently option 1 is implemented - we parse the comment string in the image history to figure out which layers the image was squashed from ### Response: def _resolve_squash_cache(self, client): """ Currently doing a "squash" basically negates the cache for any subsequent layers. But we can work around this by A) checking if the cache was successful for the _unsquashed_ version of the image, and B) if so, re-using an older squashed version of the image. Three ways to do this: 1. get the shas of the before/after images from `image.history` comments OR the output stream (or both). Both are extremely brittle, but also easy to access 2. Build the image without squash first. If the unsquashed image sha matches a cached one, substitute the unsuqashed image for the squashed one. If no match, re-run the steps with squash=True and store the resulting pair Less brittle than 1., but harder and defs not elegant 3. Use docker-squash as a dependency - this is by far the most preferable solution, except that they don't yet support the newest docker sdk version. Currently option 1 is implemented - we parse the comment string in the image history to figure out which layers the image was squashed from """ from .staging import BUILD_CACHEDIR history = client.api.history(self.buildname) comment = history[0].get('Comment', '').split() if len(comment) != 4 or comment[0] != 'merge' or comment[2] != 'to': print('WARNING: failed to parse this image\'s pre-squash history. ' 'The build will continue, but all subsequent layers will be rebuilt.') return squashed_sha = history[0]['Id'] start_squash_sha = comment[1] end_squash_sha = comment[3] cprint(' Layers %s to %s were squashed.' % (start_squash_sha, end_squash_sha), 'yellow') # check cache squashcache = os.path.join(BUILD_CACHEDIR, 'squashes') if not os.path.exists(squashcache): os.makedirs(squashcache) cachepath = os.path.join(BUILD_CACHEDIR, 'squashes', '%s-%s' % (start_squash_sha, end_squash_sha)) # on hit, tag the squashedsha as the result of this build step if os.path.exists(cachepath): self._get_squashed_layer_cache(client, squashed_sha, cachepath) else: self._cache_squashed_layer(squashed_sha, cachepath)
def discover(name, wait_for_s=60): """Discover a service by name Look for an advert to a named service:: address = nw0.discover("myservice") :param name: any text :param wait_for_s: how many seconds to wait before giving up :returns: the address found or None """ _start_beacon() # # It's possible to enter a deadlock situation where the first # process fires off a discovery request and waits for the # second process to advertise. But the second process has to # connect to the rpc port of the first process' beacon and # its advertisement is queued behind the pending discovery. # # To give both a chance of succeeding we operate in bursts, # allowing them to interleave. # t0 = time.time() while True: discovery = _rpc("discover", name, 0.5) if discovery: return discovery if timed_out(t0, wait_for_s): return None
Discover a service by name Look for an advert to a named service:: address = nw0.discover("myservice") :param name: any text :param wait_for_s: how many seconds to wait before giving up :returns: the address found or None
Below is the the instruction that describes the task: ### Input: Discover a service by name Look for an advert to a named service:: address = nw0.discover("myservice") :param name: any text :param wait_for_s: how many seconds to wait before giving up :returns: the address found or None ### Response: def discover(name, wait_for_s=60): """Discover a service by name Look for an advert to a named service:: address = nw0.discover("myservice") :param name: any text :param wait_for_s: how many seconds to wait before giving up :returns: the address found or None """ _start_beacon() # # It's possible to enter a deadlock situation where the first # process fires off a discovery request and waits for the # second process to advertise. But the second process has to # connect to the rpc port of the first process' beacon and # its advertisement is queued behind the pending discovery. # # To give both a chance of succeeding we operate in bursts, # allowing them to interleave. # t0 = time.time() while True: discovery = _rpc("discover", name, 0.5) if discovery: return discovery if timed_out(t0, wait_for_s): return None
def collect_bin_data(ruptures, sitecol, cmaker, iml4, truncation_level, n_epsilons, monitor=Monitor()): """ :param ruptures: a list of ruptures :param sitecol: a SiteCollection instance :param cmaker: a ContextMaker instance :param iml4: an ArrayWrapper of intensities of shape (N, R, M, P) :param truncation_level: the truncation level :param n_epsilons: the number of epsilons :param monitor: a Monitor instance :returns: a dictionary (poe, imt, rlzi) -> probabilities of shape (N, E) """ # NB: instantiating truncnorm is slow and calls the infamous "doccer" truncnorm = scipy.stats.truncnorm(-truncation_level, truncation_level) epsilons = numpy.linspace(truncnorm.a, truncnorm.b, n_epsilons + 1) acc = cmaker.disaggregate( sitecol, ruptures, iml4, truncnorm, epsilons, monitor) return pack(acc, 'mags dists lons lats'.split())
:param ruptures: a list of ruptures :param sitecol: a SiteCollection instance :param cmaker: a ContextMaker instance :param iml4: an ArrayWrapper of intensities of shape (N, R, M, P) :param truncation_level: the truncation level :param n_epsilons: the number of epsilons :param monitor: a Monitor instance :returns: a dictionary (poe, imt, rlzi) -> probabilities of shape (N, E)
Below is the the instruction that describes the task: ### Input: :param ruptures: a list of ruptures :param sitecol: a SiteCollection instance :param cmaker: a ContextMaker instance :param iml4: an ArrayWrapper of intensities of shape (N, R, M, P) :param truncation_level: the truncation level :param n_epsilons: the number of epsilons :param monitor: a Monitor instance :returns: a dictionary (poe, imt, rlzi) -> probabilities of shape (N, E) ### Response: def collect_bin_data(ruptures, sitecol, cmaker, iml4, truncation_level, n_epsilons, monitor=Monitor()): """ :param ruptures: a list of ruptures :param sitecol: a SiteCollection instance :param cmaker: a ContextMaker instance :param iml4: an ArrayWrapper of intensities of shape (N, R, M, P) :param truncation_level: the truncation level :param n_epsilons: the number of epsilons :param monitor: a Monitor instance :returns: a dictionary (poe, imt, rlzi) -> probabilities of shape (N, E) """ # NB: instantiating truncnorm is slow and calls the infamous "doccer" truncnorm = scipy.stats.truncnorm(-truncation_level, truncation_level) epsilons = numpy.linspace(truncnorm.a, truncnorm.b, n_epsilons + 1) acc = cmaker.disaggregate( sitecol, ruptures, iml4, truncnorm, epsilons, monitor) return pack(acc, 'mags dists lons lats'.split())
def parse_args(): """Parse arguments.""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Diagnose script for checking the current system.') choices = ['python', 'pip', 'mxnet', 'os', 'hardware', 'network'] for choice in choices: parser.add_argument('--' + choice, default=1, type=int, help='Diagnose {}.'.format(choice)) parser.add_argument('--region', default='', type=str, help="Additional sites in which region(s) to test. \ Specify 'cn' for example to test mirror sites in China.") parser.add_argument('--timeout', default=10, type=int, help="Connection test timeout threshold, 0 to disable.") args = parser.parse_args() return args
Parse arguments.
Below is the the instruction that describes the task: ### Input: Parse arguments. ### Response: def parse_args(): """Parse arguments.""" parser = argparse.ArgumentParser( formatter_class=argparse.ArgumentDefaultsHelpFormatter, description='Diagnose script for checking the current system.') choices = ['python', 'pip', 'mxnet', 'os', 'hardware', 'network'] for choice in choices: parser.add_argument('--' + choice, default=1, type=int, help='Diagnose {}.'.format(choice)) parser.add_argument('--region', default='', type=str, help="Additional sites in which region(s) to test. \ Specify 'cn' for example to test mirror sites in China.") parser.add_argument('--timeout', default=10, type=int, help="Connection test timeout threshold, 0 to disable.") args = parser.parse_args() return args
def args(parsed_args, name=None): """Interpret parsed args to streams""" strings = parsed_args.arg_strings(name) files = [s for s in strings if os.path.isfile(s)] if files: streams = [open(f) for f in files] else: streams = [] if getattr(parsed_args, 'paste', not files): streams.append(clipboard_stream()) if getattr(parsed_args, 'stdin', False): streams.append(sys.stdin) elif not streams: streams = [sys.stdin] return streams
Interpret parsed args to streams
Below is the the instruction that describes the task: ### Input: Interpret parsed args to streams ### Response: def args(parsed_args, name=None): """Interpret parsed args to streams""" strings = parsed_args.arg_strings(name) files = [s for s in strings if os.path.isfile(s)] if files: streams = [open(f) for f in files] else: streams = [] if getattr(parsed_args, 'paste', not files): streams.append(clipboard_stream()) if getattr(parsed_args, 'stdin', False): streams.append(sys.stdin) elif not streams: streams = [sys.stdin] return streams
def killCells(self, percent=0.05): """ Changes the percentage of cells that are now considered dead. The first time you call this method a permutation list is set up. Calls change the number of cells considered dead. """ numColumns = numpy.prod(self.getColumnDimensions()) if self.zombiePermutation is None: self.zombiePermutation = numpy.random.permutation(numColumns) self.numDead = int(round(percent * numColumns)) if self.numDead > 0: self.deadCols = self.zombiePermutation[0:self.numDead] else: self.deadCols = numpy.array([]) self.deadColumnInputSpan = self.getConnectedSpan(self.deadCols) self.removeDeadColumns()
Changes the percentage of cells that are now considered dead. The first time you call this method a permutation list is set up. Calls change the number of cells considered dead.
Below is the the instruction that describes the task: ### Input: Changes the percentage of cells that are now considered dead. The first time you call this method a permutation list is set up. Calls change the number of cells considered dead. ### Response: def killCells(self, percent=0.05): """ Changes the percentage of cells that are now considered dead. The first time you call this method a permutation list is set up. Calls change the number of cells considered dead. """ numColumns = numpy.prod(self.getColumnDimensions()) if self.zombiePermutation is None: self.zombiePermutation = numpy.random.permutation(numColumns) self.numDead = int(round(percent * numColumns)) if self.numDead > 0: self.deadCols = self.zombiePermutation[0:self.numDead] else: self.deadCols = numpy.array([]) self.deadColumnInputSpan = self.getConnectedSpan(self.deadCols) self.removeDeadColumns()
def post(self, request, hook_id): """ Process Kik webhook: 1. Get an enabled Kik bot 2. Verify Kik signature 3. Serialize each message 4. For each message create :class:`KikMessage <permabots.models.kik_api.KikMessage>` and :class:`KikUser <permabots.models.kik_api.KikUser>` 5. Delay each message processing to a task 6. Response provider """ try: bot = caching.get_or_set(KikBot, hook_id) except KikBot.DoesNotExist: logger.warning("Hook id %s not associated to a bot" % hook_id) return Response(status=status.HTTP_404_NOT_FOUND) signature = request.META.get('HTTP_X_KIK_SIGNATURE') if signature: signature.encode('utf-8') if not bot._bot.verify_signature(signature, request.stream.body): logger.debug("Kik Bot data %s not verified %s" % (request.data, signature)) return Response(status=403) logger.debug("Kik Bot data %s verified" % (request.data)) for kik_message in request.data['messages']: serializer = KikMessageSerializer(data=kik_message) logger.debug("Kik message %s serialized" % (kik_message)) if serializer.is_valid(): try: if not self.accepted_types(serializer): raise OnlyTextMessages message = self.create_message(serializer, bot) if bot.enabled: logger.debug("Kik Bot %s attending request %s" % (bot, kik_message)) handle_message.delay(message.id, bot.id) else: logger.error("Message %s ignored by disabled bot %s" % (message, bot)) except OnlyTextMessages: logger.warning("Not text message %s for bot %s" % (kik_message, hook_id)) return Response(status=status.HTTP_200_OK) except: exc_info = sys.exc_info() traceback.print_exception(*exc_info) logger.error("Error processing %s for bot %s" % (kik_message, hook_id)) return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR) else: logger.error("Validation error: %s from kik message %s" % (serializer.errors, kik_message)) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(serializer.data, status=status.HTTP_200_OK)
Process Kik webhook: 1. Get an enabled Kik bot 2. Verify Kik signature 3. Serialize each message 4. For each message create :class:`KikMessage <permabots.models.kik_api.KikMessage>` and :class:`KikUser <permabots.models.kik_api.KikUser>` 5. Delay each message processing to a task 6. Response provider
Below is the the instruction that describes the task: ### Input: Process Kik webhook: 1. Get an enabled Kik bot 2. Verify Kik signature 3. Serialize each message 4. For each message create :class:`KikMessage <permabots.models.kik_api.KikMessage>` and :class:`KikUser <permabots.models.kik_api.KikUser>` 5. Delay each message processing to a task 6. Response provider ### Response: def post(self, request, hook_id): """ Process Kik webhook: 1. Get an enabled Kik bot 2. Verify Kik signature 3. Serialize each message 4. For each message create :class:`KikMessage <permabots.models.kik_api.KikMessage>` and :class:`KikUser <permabots.models.kik_api.KikUser>` 5. Delay each message processing to a task 6. Response provider """ try: bot = caching.get_or_set(KikBot, hook_id) except KikBot.DoesNotExist: logger.warning("Hook id %s not associated to a bot" % hook_id) return Response(status=status.HTTP_404_NOT_FOUND) signature = request.META.get('HTTP_X_KIK_SIGNATURE') if signature: signature.encode('utf-8') if not bot._bot.verify_signature(signature, request.stream.body): logger.debug("Kik Bot data %s not verified %s" % (request.data, signature)) return Response(status=403) logger.debug("Kik Bot data %s verified" % (request.data)) for kik_message in request.data['messages']: serializer = KikMessageSerializer(data=kik_message) logger.debug("Kik message %s serialized" % (kik_message)) if serializer.is_valid(): try: if not self.accepted_types(serializer): raise OnlyTextMessages message = self.create_message(serializer, bot) if bot.enabled: logger.debug("Kik Bot %s attending request %s" % (bot, kik_message)) handle_message.delay(message.id, bot.id) else: logger.error("Message %s ignored by disabled bot %s" % (message, bot)) except OnlyTextMessages: logger.warning("Not text message %s for bot %s" % (kik_message, hook_id)) return Response(status=status.HTTP_200_OK) except: exc_info = sys.exc_info() traceback.print_exception(*exc_info) logger.error("Error processing %s for bot %s" % (kik_message, hook_id)) return Response(serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR) else: logger.error("Validation error: %s from kik message %s" % (serializer.errors, kik_message)) return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST) return Response(serializer.data, status=status.HTTP_200_OK)
def on_close(self, evt): """ Pop-up menu and wx.EVT_CLOSE closing event """ self.stop() # DoseWatcher if evt.EventObject is not self: # Avoid deadlocks self.Close() # wx.Frame evt.Skip()
Pop-up menu and wx.EVT_CLOSE closing event
Below is the the instruction that describes the task: ### Input: Pop-up menu and wx.EVT_CLOSE closing event ### Response: def on_close(self, evt): """ Pop-up menu and wx.EVT_CLOSE closing event """ self.stop() # DoseWatcher if evt.EventObject is not self: # Avoid deadlocks self.Close() # wx.Frame evt.Skip()
def versatile_options(): """return list of options that can be changed at any time (not only be initialized), however the list might not be entirely up to date. The string ' #v ' in the default value indicates a 'versatile' option that can be changed any time. """ return tuple(sorted(i[0] for i in list(CMAOptions.defaults().items()) if i[1].find(' #v ') > 0))
return list of options that can be changed at any time (not only be initialized), however the list might not be entirely up to date. The string ' #v ' in the default value indicates a 'versatile' option that can be changed any time.
Below is the the instruction that describes the task: ### Input: return list of options that can be changed at any time (not only be initialized), however the list might not be entirely up to date. The string ' #v ' in the default value indicates a 'versatile' option that can be changed any time. ### Response: def versatile_options(): """return list of options that can be changed at any time (not only be initialized), however the list might not be entirely up to date. The string ' #v ' in the default value indicates a 'versatile' option that can be changed any time. """ return tuple(sorted(i[0] for i in list(CMAOptions.defaults().items()) if i[1].find(' #v ') > 0))
def _do_weak_search(self, obj, recursive): """Search for an element that looks like *obj* within the node list. This follows the same rules as :meth:`_do_strong_search` with some differences. *obj* is treated as a string that might represent any :class:`.Node`, :class:`.Wikicode`, or combination of the two present in the node list. Thus, matching is weak (using string comparisons) rather than strong (using ``is``). Because multiple nodes can match *obj*, the result is a list of tuples instead of just one (however, :exc:`ValueError` is still raised if nothing is found). Individual matches will never overlap. The tuples contain a new first element, *exact*, which is ``True`` if we were able to match *obj* exactly to one or more adjacent nodes, or ``False`` if we found *obj* inside a node or incompletely spanning multiple nodes. """ obj = parse_anything(obj) if not obj or obj not in self: raise ValueError(obj) results = [] contexts = [self] while contexts: context = contexts.pop() i = len(context.nodes) - 1 while i >= 0: node = context.get(i) if obj.get(-1) == node: for j in range(-len(obj.nodes), -1): if obj.get(j) != context.get(i + j + 1): break else: i -= len(obj.nodes) - 1 index = slice(i, i + len(obj.nodes)) results.append((True, context, index)) elif recursive and obj in node: contexts.extend(node.__children__()) i -= 1 if not results: if not recursive: raise ValueError(obj) results.append((False, self, slice(0, len(self.nodes)))) return results
Search for an element that looks like *obj* within the node list. This follows the same rules as :meth:`_do_strong_search` with some differences. *obj* is treated as a string that might represent any :class:`.Node`, :class:`.Wikicode`, or combination of the two present in the node list. Thus, matching is weak (using string comparisons) rather than strong (using ``is``). Because multiple nodes can match *obj*, the result is a list of tuples instead of just one (however, :exc:`ValueError` is still raised if nothing is found). Individual matches will never overlap. The tuples contain a new first element, *exact*, which is ``True`` if we were able to match *obj* exactly to one or more adjacent nodes, or ``False`` if we found *obj* inside a node or incompletely spanning multiple nodes.
Below is the the instruction that describes the task: ### Input: Search for an element that looks like *obj* within the node list. This follows the same rules as :meth:`_do_strong_search` with some differences. *obj* is treated as a string that might represent any :class:`.Node`, :class:`.Wikicode`, or combination of the two present in the node list. Thus, matching is weak (using string comparisons) rather than strong (using ``is``). Because multiple nodes can match *obj*, the result is a list of tuples instead of just one (however, :exc:`ValueError` is still raised if nothing is found). Individual matches will never overlap. The tuples contain a new first element, *exact*, which is ``True`` if we were able to match *obj* exactly to one or more adjacent nodes, or ``False`` if we found *obj* inside a node or incompletely spanning multiple nodes. ### Response: def _do_weak_search(self, obj, recursive): """Search for an element that looks like *obj* within the node list. This follows the same rules as :meth:`_do_strong_search` with some differences. *obj* is treated as a string that might represent any :class:`.Node`, :class:`.Wikicode`, or combination of the two present in the node list. Thus, matching is weak (using string comparisons) rather than strong (using ``is``). Because multiple nodes can match *obj*, the result is a list of tuples instead of just one (however, :exc:`ValueError` is still raised if nothing is found). Individual matches will never overlap. The tuples contain a new first element, *exact*, which is ``True`` if we were able to match *obj* exactly to one or more adjacent nodes, or ``False`` if we found *obj* inside a node or incompletely spanning multiple nodes. """ obj = parse_anything(obj) if not obj or obj not in self: raise ValueError(obj) results = [] contexts = [self] while contexts: context = contexts.pop() i = len(context.nodes) - 1 while i >= 0: node = context.get(i) if obj.get(-1) == node: for j in range(-len(obj.nodes), -1): if obj.get(j) != context.get(i + j + 1): break else: i -= len(obj.nodes) - 1 index = slice(i, i + len(obj.nodes)) results.append((True, context, index)) elif recursive and obj in node: contexts.extend(node.__children__()) i -= 1 if not results: if not recursive: raise ValueError(obj) results.append((False, self, slice(0, len(self.nodes)))) return results
def NewFromContent(cls, content, urn, chunk_size=1024, token=None, private_key=None, public_key=None): """Alternate constructor for GRRSignedBlob. Creates a GRRSignedBlob from a content string by chunking it and signing each chunk. Args: content: The data to stored in the GRRSignedBlob. urn: The AFF4 URN to create. chunk_size: Data will be chunked into this size (each chunk is individually signed. token: The ACL Token. private_key: An rdf_crypto.RSAPrivateKey() instance. public_key: An rdf_crypto.RSAPublicKey() instance. Returns: the URN of the new object written. """ aff4.FACTORY.Delete(urn, token=token) with data_store.DB.GetMutationPool() as pool: with aff4.FACTORY.Create( urn, cls, mode="w", mutation_pool=pool, token=token) as fd: for start_of_chunk in range(0, len(content), chunk_size): chunk = content[start_of_chunk:start_of_chunk + chunk_size] blob_rdf = rdf_crypto.SignedBlob() blob_rdf.Sign(chunk, private_key, public_key) fd.Add(blob_rdf, mutation_pool=pool) return urn
Alternate constructor for GRRSignedBlob. Creates a GRRSignedBlob from a content string by chunking it and signing each chunk. Args: content: The data to stored in the GRRSignedBlob. urn: The AFF4 URN to create. chunk_size: Data will be chunked into this size (each chunk is individually signed. token: The ACL Token. private_key: An rdf_crypto.RSAPrivateKey() instance. public_key: An rdf_crypto.RSAPublicKey() instance. Returns: the URN of the new object written.
Below is the the instruction that describes the task: ### Input: Alternate constructor for GRRSignedBlob. Creates a GRRSignedBlob from a content string by chunking it and signing each chunk. Args: content: The data to stored in the GRRSignedBlob. urn: The AFF4 URN to create. chunk_size: Data will be chunked into this size (each chunk is individually signed. token: The ACL Token. private_key: An rdf_crypto.RSAPrivateKey() instance. public_key: An rdf_crypto.RSAPublicKey() instance. Returns: the URN of the new object written. ### Response: def NewFromContent(cls, content, urn, chunk_size=1024, token=None, private_key=None, public_key=None): """Alternate constructor for GRRSignedBlob. Creates a GRRSignedBlob from a content string by chunking it and signing each chunk. Args: content: The data to stored in the GRRSignedBlob. urn: The AFF4 URN to create. chunk_size: Data will be chunked into this size (each chunk is individually signed. token: The ACL Token. private_key: An rdf_crypto.RSAPrivateKey() instance. public_key: An rdf_crypto.RSAPublicKey() instance. Returns: the URN of the new object written. """ aff4.FACTORY.Delete(urn, token=token) with data_store.DB.GetMutationPool() as pool: with aff4.FACTORY.Create( urn, cls, mode="w", mutation_pool=pool, token=token) as fd: for start_of_chunk in range(0, len(content), chunk_size): chunk = content[start_of_chunk:start_of_chunk + chunk_size] blob_rdf = rdf_crypto.SignedBlob() blob_rdf.Sign(chunk, private_key, public_key) fd.Add(blob_rdf, mutation_pool=pool) return urn
def _ParseLeak( self, parser_mediator, cache_directories, msiecf_item, recovered=False): """Extract data from a MSIE Cache Files (MSIECF) leak item. Every item is stored as an event object, one for each timestamp. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache_directories (list[str]): cache directory names. msiecf_item (pymsiecf.leak): MSIECF leak item. recovered (Optional[bool]): True if the item was recovered. """ # TODO: add support for possible last cache synchronization date and time. date_time = dfdatetime_semantic_time.SemanticTime('Not set') event_data = MSIECFLeakEventData() event_data.cached_filename = msiecf_item.filename event_data.cached_file_size = msiecf_item.cached_file_size event_data.cache_directory_index = msiecf_item.cache_directory_index event_data.offset = msiecf_item.offset event_data.recovered = recovered if (event_data.cache_directory_index >= 0 and event_data.cache_directory_index < len(cache_directories)): event_data.cache_directory_name = ( cache_directories[event_data.cache_directory_index]) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
Extract data from a MSIE Cache Files (MSIECF) leak item. Every item is stored as an event object, one for each timestamp. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache_directories (list[str]): cache directory names. msiecf_item (pymsiecf.leak): MSIECF leak item. recovered (Optional[bool]): True if the item was recovered.
Below is the the instruction that describes the task: ### Input: Extract data from a MSIE Cache Files (MSIECF) leak item. Every item is stored as an event object, one for each timestamp. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache_directories (list[str]): cache directory names. msiecf_item (pymsiecf.leak): MSIECF leak item. recovered (Optional[bool]): True if the item was recovered. ### Response: def _ParseLeak( self, parser_mediator, cache_directories, msiecf_item, recovered=False): """Extract data from a MSIE Cache Files (MSIECF) leak item. Every item is stored as an event object, one for each timestamp. Args: parser_mediator (ParserMediator): mediates interactions between parsers and other components, such as storage and dfvfs. cache_directories (list[str]): cache directory names. msiecf_item (pymsiecf.leak): MSIECF leak item. recovered (Optional[bool]): True if the item was recovered. """ # TODO: add support for possible last cache synchronization date and time. date_time = dfdatetime_semantic_time.SemanticTime('Not set') event_data = MSIECFLeakEventData() event_data.cached_filename = msiecf_item.filename event_data.cached_file_size = msiecf_item.cached_file_size event_data.cache_directory_index = msiecf_item.cache_directory_index event_data.offset = msiecf_item.offset event_data.recovered = recovered if (event_data.cache_directory_index >= 0 and event_data.cache_directory_index < len(cache_directories)): event_data.cache_directory_name = ( cache_directories[event_data.cache_directory_index]) event = time_events.DateTimeValuesEvent( date_time, definitions.TIME_DESCRIPTION_NOT_A_TIME) parser_mediator.ProduceEventWithEventData(event, event_data)
def sprite(map, sprite, offset_x=None, offset_y=None, cache_buster=True): """ Returns the image and background position for use in a single shorthand property """ map = map.render() sprite_maps = _get_cache('sprite_maps') sprite_map = sprite_maps.get(map) sprite_name = String.unquoted(sprite).value sprite = sprite_map and sprite_map.get(sprite_name) if not sprite_map: log.error("No sprite map found: %s", map, extra={'stack': True}) elif not sprite: log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True}) if sprite: url = '%s%s' % (config.ASSETS_URL, sprite_map['*f*']) if cache_buster: url += '?_=%s' % sprite_map['*t*'] x = Number(offset_x or 0, 'px') y = Number(offset_y or 0, 'px') if not x.value or (x.value <= -1 or x.value >= 1) and not x.is_simple_unit('%'): x -= Number(sprite[2], 'px') if not y.value or (y.value <= -1 or y.value >= 1) and not y.is_simple_unit('%'): y -= Number(sprite[3], 'px') url = "url(%s)" % escape(url) return List([String.unquoted(url), x, y]) return List([Number(0), Number(0)])
Returns the image and background position for use in a single shorthand property
Below is the the instruction that describes the task: ### Input: Returns the image and background position for use in a single shorthand property ### Response: def sprite(map, sprite, offset_x=None, offset_y=None, cache_buster=True): """ Returns the image and background position for use in a single shorthand property """ map = map.render() sprite_maps = _get_cache('sprite_maps') sprite_map = sprite_maps.get(map) sprite_name = String.unquoted(sprite).value sprite = sprite_map and sprite_map.get(sprite_name) if not sprite_map: log.error("No sprite map found: %s", map, extra={'stack': True}) elif not sprite: log.error("No sprite found: %s in %s", sprite_name, sprite_map['*n*'], extra={'stack': True}) if sprite: url = '%s%s' % (config.ASSETS_URL, sprite_map['*f*']) if cache_buster: url += '?_=%s' % sprite_map['*t*'] x = Number(offset_x or 0, 'px') y = Number(offset_y or 0, 'px') if not x.value or (x.value <= -1 or x.value >= 1) and not x.is_simple_unit('%'): x -= Number(sprite[2], 'px') if not y.value or (y.value <= -1 or y.value >= 1) and not y.is_simple_unit('%'): y -= Number(sprite[3], 'px') url = "url(%s)" % escape(url) return List([String.unquoted(url), x, y]) return List([Number(0), Number(0)])
def xml_to_metrics(xmlstr, object_type): '''Converts xml response to service bus metrics objects The xml format for MetricProperties <entry> <id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id> <title/> <updated>2014-10-09T11:56:50Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Name>listeners.active</d:Name> <d:PrimaryAggregation>Average</d:PrimaryAggregation> <d:Unit>Count</d:Unit> <d:DisplayName>Active listeners</d:DisplayName> </m:properties> </content> </entry> The xml format for MetricValues <entry> <id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id> <title/> <updated>2014-10-09T18:38:28Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp> <d:Min m:type="Edm.Int64">-118</d:Min> <d:Max m:type="Edm.Int64">15</d:Max> <d:Average m:type="Edm.Single">-78.44444</d:Average> <d:Total m:type="Edm.Int64">0</d:Total> </m:properties> </content> </entry> ''' xmldoc = minidom.parseString(xmlstr) return_obj = object_type() members = dict(vars(return_obj)) # Only one entry here for xml_entry in _MinidomXmlToObject.get_children_from_path(xmldoc, 'entry'): for node in _MinidomXmlToObject.get_children_from_path(xml_entry, 'content', 'properties'): for name in members: xml_name = _get_serialization_name(name) children = _MinidomXmlToObject.get_child_nodes(node, xml_name) if not children: continue child = children[0] node_type = child.getAttributeNS("http://schemas.microsoft.com/ado/2007/08/dataservices/metadata", 'type') node_value = _ServiceBusManagementXmlSerializer.odata_converter(child.firstChild.nodeValue, node_type) setattr(return_obj, name, node_value) for name, value in _MinidomXmlToObject.get_entry_properties_from_node( xml_entry, include_id=True, use_title_as_id=False).items(): if name in members: continue # Do not override if already members setattr(return_obj, name, value) return return_obj
Converts xml response to service bus metrics objects The xml format for MetricProperties <entry> <id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id> <title/> <updated>2014-10-09T11:56:50Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Name>listeners.active</d:Name> <d:PrimaryAggregation>Average</d:PrimaryAggregation> <d:Unit>Count</d:Unit> <d:DisplayName>Active listeners</d:DisplayName> </m:properties> </content> </entry> The xml format for MetricValues <entry> <id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id> <title/> <updated>2014-10-09T18:38:28Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp> <d:Min m:type="Edm.Int64">-118</d:Min> <d:Max m:type="Edm.Int64">15</d:Max> <d:Average m:type="Edm.Single">-78.44444</d:Average> <d:Total m:type="Edm.Int64">0</d:Total> </m:properties> </content> </entry>
Below is the the instruction that describes the task: ### Input: Converts xml response to service bus metrics objects The xml format for MetricProperties <entry> <id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id> <title/> <updated>2014-10-09T11:56:50Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Name>listeners.active</d:Name> <d:PrimaryAggregation>Average</d:PrimaryAggregation> <d:Unit>Count</d:Unit> <d:DisplayName>Active listeners</d:DisplayName> </m:properties> </content> </entry> The xml format for MetricValues <entry> <id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id> <title/> <updated>2014-10-09T18:38:28Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp> <d:Min m:type="Edm.Int64">-118</d:Min> <d:Max m:type="Edm.Int64">15</d:Max> <d:Average m:type="Edm.Single">-78.44444</d:Average> <d:Total m:type="Edm.Int64">0</d:Total> </m:properties> </content> </entry> ### Response: def xml_to_metrics(xmlstr, object_type): '''Converts xml response to service bus metrics objects The xml format for MetricProperties <entry> <id>https://sbgm.windows.net/Metrics(\'listeners.active\')</id> <title/> <updated>2014-10-09T11:56:50Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Name>listeners.active</d:Name> <d:PrimaryAggregation>Average</d:PrimaryAggregation> <d:Unit>Count</d:Unit> <d:DisplayName>Active listeners</d:DisplayName> </m:properties> </content> </entry> The xml format for MetricValues <entry> <id>https://sbgm.windows.net/MetricValues(datetime\'2014-10-02T00:00:00Z\')</id> <title/> <updated>2014-10-09T18:38:28Z</updated> <author> <name/> </author> <content type="application/xml"> <m:properties> <d:Timestamp m:type="Edm.DateTime">2014-10-02T00:00:00Z</d:Timestamp> <d:Min m:type="Edm.Int64">-118</d:Min> <d:Max m:type="Edm.Int64">15</d:Max> <d:Average m:type="Edm.Single">-78.44444</d:Average> <d:Total m:type="Edm.Int64">0</d:Total> </m:properties> </content> </entry> ''' xmldoc = minidom.parseString(xmlstr) return_obj = object_type() members = dict(vars(return_obj)) # Only one entry here for xml_entry in _MinidomXmlToObject.get_children_from_path(xmldoc, 'entry'): for node in _MinidomXmlToObject.get_children_from_path(xml_entry, 'content', 'properties'): for name in members: xml_name = _get_serialization_name(name) children = _MinidomXmlToObject.get_child_nodes(node, xml_name) if not children: continue child = children[0] node_type = child.getAttributeNS("http://schemas.microsoft.com/ado/2007/08/dataservices/metadata", 'type') node_value = _ServiceBusManagementXmlSerializer.odata_converter(child.firstChild.nodeValue, node_type) setattr(return_obj, name, node_value) for name, value in _MinidomXmlToObject.get_entry_properties_from_node( xml_entry, include_id=True, use_title_as_id=False).items(): if name in members: continue # Do not override if already members setattr(return_obj, name, value) return return_obj
def listen_dataset_events(self, owner_id, project_id, dataset_id): """ Authenticate to start using dataset events. """ if not self._user_id: raise AmigoCloudError(self.error_msg['logged_in_websockets']) url = '/users/%s/projects/%s/datasets/%s/start_websocket_session' response = self.get(url % (owner_id, project_id, dataset_id)) websocket_session = response['websocket_session'] auth_data = {'userid': self._user_id, 'datasetid': dataset_id, 'websocket_session': websocket_session} self.amigosocket.emit('authenticate', auth_data)
Authenticate to start using dataset events.
Below is the the instruction that describes the task: ### Input: Authenticate to start using dataset events. ### Response: def listen_dataset_events(self, owner_id, project_id, dataset_id): """ Authenticate to start using dataset events. """ if not self._user_id: raise AmigoCloudError(self.error_msg['logged_in_websockets']) url = '/users/%s/projects/%s/datasets/%s/start_websocket_session' response = self.get(url % (owner_id, project_id, dataset_id)) websocket_session = response['websocket_session'] auth_data = {'userid': self._user_id, 'datasetid': dataset_id, 'websocket_session': websocket_session} self.amigosocket.emit('authenticate', auth_data)
def _get_action_from_method_and_request_uri(self, method, request_uri): """basically used for `rest-json` APIs You can refer to example from link below https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json """ # service response class should have 'SERVICE_NAME' class member, # if you want to get action from method and url if not hasattr(self, 'SERVICE_NAME'): return None service = self.SERVICE_NAME conn = boto3.client(service, region_name=self.region) # make cache if it does not exist yet if not hasattr(self, 'method_urls'): self.method_urls = defaultdict(lambda: defaultdict(str)) op_names = conn._service_model.operation_names for op_name in op_names: op_model = conn._service_model.operation_model(op_name) _method = op_model.http['method'] uri_regexp = self.uri_to_regexp(op_model.http['requestUri']) self.method_urls[_method][uri_regexp] = op_model.name regexp_and_names = self.method_urls[method] for regexp, name in regexp_and_names.items(): match = re.match(regexp, request_uri) self.uri_match = match if match: return name return None
basically used for `rest-json` APIs You can refer to example from link below https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json
Below is the the instruction that describes the task: ### Input: basically used for `rest-json` APIs You can refer to example from link below https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json ### Response: def _get_action_from_method_and_request_uri(self, method, request_uri): """basically used for `rest-json` APIs You can refer to example from link below https://github.com/boto/botocore/blob/develop/botocore/data/iot/2015-05-28/service-2.json """ # service response class should have 'SERVICE_NAME' class member, # if you want to get action from method and url if not hasattr(self, 'SERVICE_NAME'): return None service = self.SERVICE_NAME conn = boto3.client(service, region_name=self.region) # make cache if it does not exist yet if not hasattr(self, 'method_urls'): self.method_urls = defaultdict(lambda: defaultdict(str)) op_names = conn._service_model.operation_names for op_name in op_names: op_model = conn._service_model.operation_model(op_name) _method = op_model.http['method'] uri_regexp = self.uri_to_regexp(op_model.http['requestUri']) self.method_urls[_method][uri_regexp] = op_model.name regexp_and_names = self.method_urls[method] for regexp, name in regexp_and_names.items(): match = re.match(regexp, request_uri) self.uri_match = match if match: return name return None
def perform(self): """ Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method. """ if self.use_https: conn = client.HTTPSConnection(self.host, self.port) else: conn = client.HTTPConnection(self.host, self.port) conn.request(self.method, self.uri) response = conn.getresponse() conn.close() return bool(response.status >= 200 and response.status < 300)
Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method.
Below is the the instruction that describes the task: ### Input: Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method. ### Response: def perform(self): """ Performs a simple HTTP request against the configured url and returns true if the response has a 2xx code. The url can be configured to use https via the "https" boolean flag in the config, as well as a custom HTTP method via the "method" key. The default is to not use https and the GET method. """ if self.use_https: conn = client.HTTPSConnection(self.host, self.port) else: conn = client.HTTPConnection(self.host, self.port) conn.request(self.method, self.uri) response = conn.getresponse() conn.close() return bool(response.status >= 200 and response.status < 300)
def delete_tag_for_component(user, c_id, tag_id): """Delete a tag on a specific component.""" # Todo : check c_id and tag_id exist in db query = _TABLE_TAGS.delete().where(_TABLE_TAGS.c.tag_id == tag_id and _TABLE_TAGS.c.component_id == c_id) try: flask.g.db_conn.execute(query) except sa_exc.IntegrityError: raise dci_exc.DCICreationConflict(_TABLE_TAGS.c.tag_id, 'tag_id') return flask.Response(None, 204, content_type='application/json')
Delete a tag on a specific component.
Below is the the instruction that describes the task: ### Input: Delete a tag on a specific component. ### Response: def delete_tag_for_component(user, c_id, tag_id): """Delete a tag on a specific component.""" # Todo : check c_id and tag_id exist in db query = _TABLE_TAGS.delete().where(_TABLE_TAGS.c.tag_id == tag_id and _TABLE_TAGS.c.component_id == c_id) try: flask.g.db_conn.execute(query) except sa_exc.IntegrityError: raise dci_exc.DCICreationConflict(_TABLE_TAGS.c.tag_id, 'tag_id') return flask.Response(None, 204, content_type='application/json')
def basic_auth_required(view_func): """ A decorator that can be used to protect specific views with HTTP basic access authentication. Conditional on having BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD set as env vars. """ @wraps(view_func) def wrapper(*args, **kwargs): if app.config.get('BASIC_AUTH_ACTIVE', False): if basic_auth.authenticate(): return view_func(*args, **kwargs) else: return basic_auth.challenge() else: return view_func(*args, **kwargs) return wrapper
A decorator that can be used to protect specific views with HTTP basic access authentication. Conditional on having BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD set as env vars.
Below is the the instruction that describes the task: ### Input: A decorator that can be used to protect specific views with HTTP basic access authentication. Conditional on having BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD set as env vars. ### Response: def basic_auth_required(view_func): """ A decorator that can be used to protect specific views with HTTP basic access authentication. Conditional on having BASIC_AUTH_USERNAME and BASIC_AUTH_PASSWORD set as env vars. """ @wraps(view_func) def wrapper(*args, **kwargs): if app.config.get('BASIC_AUTH_ACTIVE', False): if basic_auth.authenticate(): return view_func(*args, **kwargs) else: return basic_auth.challenge() else: return view_func(*args, **kwargs) return wrapper
def van_enc_2d(x, first_depth, reuse=False): """The higher level structure encoder for the VAN. The high level structure is a vector instead of an image. Args: x: The higher level structure to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. Returns: The encoded image. """ with tf.variable_scope('van_enc', reuse=reuse): a = 4 # depends on the inputs size b = 4 # a, b = 4,4 enc = tf.nn.relu(x) enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu) enc = tf.contrib.layers.layer_norm(enc) enc = tf.reshape(enc, [-1, a, b, first_depth]) enc = tf.layers.conv2d_transpose( enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d_transpose( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=2) van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2]) enc = tf.layers.conv2d_transpose( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d_transpose( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4]) van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1) return enc, van_higher_level
The higher level structure encoder for the VAN. The high level structure is a vector instead of an image. Args: x: The higher level structure to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. Returns: The encoded image.
Below is the the instruction that describes the task: ### Input: The higher level structure encoder for the VAN. The high level structure is a vector instead of an image. Args: x: The higher level structure to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. Returns: The encoded image. ### Response: def van_enc_2d(x, first_depth, reuse=False): """The higher level structure encoder for the VAN. The high level structure is a vector instead of an image. Args: x: The higher level structure to encode. first_depth: The depth of the first layer. Depth is increased in subsequent layers. reuse: To reuse in variable scope or not. Returns: The encoded image. """ with tf.variable_scope('van_enc', reuse=reuse): a = 4 # depends on the inputs size b = 4 # a, b = 4,4 enc = tf.nn.relu(x) enc = tf.layers.dense(enc, first_depth * a * b, tf.nn.relu) enc = tf.contrib.layers.layer_norm(enc) enc = tf.reshape(enc, [-1, a, b, first_depth]) enc = tf.layers.conv2d_transpose( enc, first_depth, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d_transpose( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=2) van_higher_level_2 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 2]) enc = tf.layers.conv2d_transpose( enc, first_depth * 2, 3, padding='same', activation=tf.nn.relu, strides=1) enc = tf.contrib.layers.layer_norm(enc) enc = tf.layers.conv2d_transpose( enc, first_depth * 4, 3, padding='same', activation=tf.nn.relu, strides=1) van_higher_level_4 = tf.reshape(enc, [-1, a * 2 * b * 2 * first_depth * 4]) van_higher_level = tf.concat([x, van_higher_level_2, van_higher_level_4], 1) return enc, van_higher_level
def _categorize_successor(self, state): """ Append state into successor lists. :param state: a SimState instance :param target: The target (of the jump/call/ret) :return: The state """ self.all_successors.append(state) target = state.scratch.target # categorize the state if o.APPROXIMATE_GUARDS in state.options and state.solver.is_false(state.scratch.guard, exact=False): if o.VALIDATE_APPROXIMATIONS in state.options: if state.satisfiable(): raise Exception('WTF') self.unsat_successors.append(state) elif o.APPROXIMATE_SATISFIABILITY in state.options and not state.solver.satisfiable(exact=False): if o.VALIDATE_APPROXIMATIONS in state.options: if state.solver.satisfiable(): raise Exception('WTF') self.unsat_successors.append(state) elif not state.scratch.guard.symbolic and state.solver.is_false(state.scratch.guard): self.unsat_successors.append(state) elif o.LAZY_SOLVES not in state.options and not state.satisfiable(): self.unsat_successors.append(state) elif o.NO_SYMBOLIC_JUMP_RESOLUTION in state.options and state.solver.symbolic(target): self.unconstrained_successors.append(state) elif not state.solver.symbolic(target) and not state.history.jumpkind.startswith("Ijk_Sys"): # a successor with a concrete IP, and it's not a syscall self.successors.append(state) self.flat_successors.append(state) elif state.history.jumpkind.startswith("Ijk_Sys"): # syscall self.successors.append(state) # Misuse the ip_at_syscall register to save the return address for this syscall # state.ip *might be* changed to be the real address of syscall SimProcedures by syscall handling code in # angr state.regs.ip_at_syscall = state.ip try: symbolic_syscall_num, concrete_syscall_nums = self._resolve_syscall(state) if concrete_syscall_nums is not None: for i, n in enumerate(concrete_syscall_nums): split_state = state if i == len(concrete_syscall_nums) - 1 else state.copy() split_state.add_constraints(symbolic_syscall_num == n) if split_state.supports_inspect: split_state.inspect.downsize() self._fix_syscall_ip(split_state) self.flat_successors.append(split_state) else: # We cannot resolve the syscall number # However, we still put it to the flat_successors list, and angr.SimOS.handle_syscall will pick it # up, and create a "unknown syscall" stub for it. self._fix_syscall_ip(state) self.flat_successors.append(state) except AngrUnsupportedSyscallError: self.unsat_successors.append(state) else: # a successor with a symbolic IP _max_targets = state.options.symbolic_ip_max_targets _max_jumptable_targets = state.options.jumptable_symbolic_ip_max_targets try: if o.NO_IP_CONCRETIZATION in state.options: # Don't try to concretize the IP cond_and_targets = [ (claripy.true, target) ] max_targets = 0 elif o.KEEP_IP_SYMBOLIC in state.options: s = claripy.Solver() addrs = s.eval(target, _max_targets + 1, extra_constraints=tuple(state.ip_constraints)) if len(addrs) > _max_targets: # It is not a library l.debug("It is not a Library") addrs = state.solver.eval_upto(target, _max_targets + 1) l.debug("addrs :%s", addrs) cond_and_targets = [ (target == addr, addr) for addr in addrs ] max_targets = _max_targets else: cond_and_targets = self._eval_target_jumptable(state, target, _max_jumptable_targets + 1) if cond_and_targets is None: # Fallback to the traditional and slow method cond_and_targets = self._eval_target_brutal(state, target, _max_targets + 1) max_targets = _max_targets else: max_targets = _max_jumptable_targets if len(cond_and_targets) > max_targets: l.warning( "Exit state has over %d possible solutions. Likely unconstrained; skipping. %s", max_targets, target.shallow_repr() ) self.unconstrained_successors.append(state) else: for cond, a in cond_and_targets: split_state = state.copy() if o.KEEP_IP_SYMBOLIC in split_state.options: split_state.regs.ip = target else: split_state.add_constraints(cond, action=True) split_state.regs.ip = a if split_state.supports_inspect: split_state.inspect.downsize() self.flat_successors.append(split_state) self.successors.append(state) except SimSolverModeError: self.unsat_successors.append(state) return state
Append state into successor lists. :param state: a SimState instance :param target: The target (of the jump/call/ret) :return: The state
Below is the the instruction that describes the task: ### Input: Append state into successor lists. :param state: a SimState instance :param target: The target (of the jump/call/ret) :return: The state ### Response: def _categorize_successor(self, state): """ Append state into successor lists. :param state: a SimState instance :param target: The target (of the jump/call/ret) :return: The state """ self.all_successors.append(state) target = state.scratch.target # categorize the state if o.APPROXIMATE_GUARDS in state.options and state.solver.is_false(state.scratch.guard, exact=False): if o.VALIDATE_APPROXIMATIONS in state.options: if state.satisfiable(): raise Exception('WTF') self.unsat_successors.append(state) elif o.APPROXIMATE_SATISFIABILITY in state.options and not state.solver.satisfiable(exact=False): if o.VALIDATE_APPROXIMATIONS in state.options: if state.solver.satisfiable(): raise Exception('WTF') self.unsat_successors.append(state) elif not state.scratch.guard.symbolic and state.solver.is_false(state.scratch.guard): self.unsat_successors.append(state) elif o.LAZY_SOLVES not in state.options and not state.satisfiable(): self.unsat_successors.append(state) elif o.NO_SYMBOLIC_JUMP_RESOLUTION in state.options and state.solver.symbolic(target): self.unconstrained_successors.append(state) elif not state.solver.symbolic(target) and not state.history.jumpkind.startswith("Ijk_Sys"): # a successor with a concrete IP, and it's not a syscall self.successors.append(state) self.flat_successors.append(state) elif state.history.jumpkind.startswith("Ijk_Sys"): # syscall self.successors.append(state) # Misuse the ip_at_syscall register to save the return address for this syscall # state.ip *might be* changed to be the real address of syscall SimProcedures by syscall handling code in # angr state.regs.ip_at_syscall = state.ip try: symbolic_syscall_num, concrete_syscall_nums = self._resolve_syscall(state) if concrete_syscall_nums is not None: for i, n in enumerate(concrete_syscall_nums): split_state = state if i == len(concrete_syscall_nums) - 1 else state.copy() split_state.add_constraints(symbolic_syscall_num == n) if split_state.supports_inspect: split_state.inspect.downsize() self._fix_syscall_ip(split_state) self.flat_successors.append(split_state) else: # We cannot resolve the syscall number # However, we still put it to the flat_successors list, and angr.SimOS.handle_syscall will pick it # up, and create a "unknown syscall" stub for it. self._fix_syscall_ip(state) self.flat_successors.append(state) except AngrUnsupportedSyscallError: self.unsat_successors.append(state) else: # a successor with a symbolic IP _max_targets = state.options.symbolic_ip_max_targets _max_jumptable_targets = state.options.jumptable_symbolic_ip_max_targets try: if o.NO_IP_CONCRETIZATION in state.options: # Don't try to concretize the IP cond_and_targets = [ (claripy.true, target) ] max_targets = 0 elif o.KEEP_IP_SYMBOLIC in state.options: s = claripy.Solver() addrs = s.eval(target, _max_targets + 1, extra_constraints=tuple(state.ip_constraints)) if len(addrs) > _max_targets: # It is not a library l.debug("It is not a Library") addrs = state.solver.eval_upto(target, _max_targets + 1) l.debug("addrs :%s", addrs) cond_and_targets = [ (target == addr, addr) for addr in addrs ] max_targets = _max_targets else: cond_and_targets = self._eval_target_jumptable(state, target, _max_jumptable_targets + 1) if cond_and_targets is None: # Fallback to the traditional and slow method cond_and_targets = self._eval_target_brutal(state, target, _max_targets + 1) max_targets = _max_targets else: max_targets = _max_jumptable_targets if len(cond_and_targets) > max_targets: l.warning( "Exit state has over %d possible solutions. Likely unconstrained; skipping. %s", max_targets, target.shallow_repr() ) self.unconstrained_successors.append(state) else: for cond, a in cond_and_targets: split_state = state.copy() if o.KEEP_IP_SYMBOLIC in split_state.options: split_state.regs.ip = target else: split_state.add_constraints(cond, action=True) split_state.regs.ip = a if split_state.supports_inspect: split_state.inspect.downsize() self.flat_successors.append(split_state) self.successors.append(state) except SimSolverModeError: self.unsat_successors.append(state) return state
def _csr_to_sdf(self): """ Inverse of _sdf_to_csr(). """ self.data = pd.DataFrame(self.data['values'].todense(), index=self.data['index'], columns=self.data['columns']).to_sparse()
Inverse of _sdf_to_csr().
Below is the the instruction that describes the task: ### Input: Inverse of _sdf_to_csr(). ### Response: def _csr_to_sdf(self): """ Inverse of _sdf_to_csr(). """ self.data = pd.DataFrame(self.data['values'].todense(), index=self.data['index'], columns=self.data['columns']).to_sparse()
def getNumberTLD(): """ Counting the total number of TLD being processed. """ total = 0 for typeTld in TLD.keys(): total+= len(TLD[typeTld]) return total
Counting the total number of TLD being processed.
Below is the the instruction that describes the task: ### Input: Counting the total number of TLD being processed. ### Response: def getNumberTLD(): """ Counting the total number of TLD being processed. """ total = 0 for typeTld in TLD.keys(): total+= len(TLD[typeTld]) return total
def match(self, context, line): """Match code lines prefixed with a variety of keywords.""" return line.kind == 'code' and line.partitioned[0] in self._both
Match code lines prefixed with a variety of keywords.
Below is the the instruction that describes the task: ### Input: Match code lines prefixed with a variety of keywords. ### Response: def match(self, context, line): """Match code lines prefixed with a variety of keywords.""" return line.kind == 'code' and line.partitioned[0] in self._both
def should_send(self, request): """Returns whether or not the request should be sent to the modules, based on the filters.""" if self.filters.get('whitelist', None): return request.tree.type in self.filters['whitelist'] elif self.filters.get('blacklist', None): return request.tree.type not in self.filters['blacklist'] else: return True
Returns whether or not the request should be sent to the modules, based on the filters.
Below is the the instruction that describes the task: ### Input: Returns whether or not the request should be sent to the modules, based on the filters. ### Response: def should_send(self, request): """Returns whether or not the request should be sent to the modules, based on the filters.""" if self.filters.get('whitelist', None): return request.tree.type in self.filters['whitelist'] elif self.filters.get('blacklist', None): return request.tree.type not in self.filters['blacklist'] else: return True
def find_one(self, collection, query): """ Search a collection for the query provided and return one result. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results. """ obj = getattr(self.db, collection) result = obj.find_one(query) return result
Search a collection for the query provided and return one result. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results.
Below is the the instruction that describes the task: ### Input: Search a collection for the query provided and return one result. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results. ### Response: def find_one(self, collection, query): """ Search a collection for the query provided and return one result. Just a raw interface to mongo to do any query you want. Args: collection: The db collection. See main class documentation. query: A mongo find query. Returns: pymongo Cursor object with the results. """ obj = getattr(self.db, collection) result = obj.find_one(query) return result
def handle_api_exception(f): """ A decorator to catch superset exceptions. Use it after the @api decorator above so superset exception handler is triggered before the handler for generic exceptions. """ def wraps(self, *args, **kwargs): try: return f(self, *args, **kwargs) except SupersetSecurityException as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), status=e.status, stacktrace=traceback.format_exc(), link=e.link) except SupersetException as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), stacktrace=traceback.format_exc(), status=e.status) except Exception as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), stacktrace=traceback.format_exc()) return functools.update_wrapper(wraps, f)
A decorator to catch superset exceptions. Use it after the @api decorator above so superset exception handler is triggered before the handler for generic exceptions.
Below is the the instruction that describes the task: ### Input: A decorator to catch superset exceptions. Use it after the @api decorator above so superset exception handler is triggered before the handler for generic exceptions. ### Response: def handle_api_exception(f): """ A decorator to catch superset exceptions. Use it after the @api decorator above so superset exception handler is triggered before the handler for generic exceptions. """ def wraps(self, *args, **kwargs): try: return f(self, *args, **kwargs) except SupersetSecurityException as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), status=e.status, stacktrace=traceback.format_exc(), link=e.link) except SupersetException as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), stacktrace=traceback.format_exc(), status=e.status) except Exception as e: logging.exception(e) return json_error_response(utils.error_msg_from_exception(e), stacktrace=traceback.format_exc()) return functools.update_wrapper(wraps, f)
def getContextsForExpression(self, body, getFingerprint=None, startIndex=0, maxResults=5, sparsity=1.0): """Get semantic contexts for the input expression Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: list of Context Raises: CorticalioException: if the request was not successful """ return self._expressions.getContextsForExpression(self._retina, body, getFingerprint, startIndex, maxResults, sparsity)
Get semantic contexts for the input expression Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: list of Context Raises: CorticalioException: if the request was not successful
Below is the the instruction that describes the task: ### Input: Get semantic contexts for the input expression Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: list of Context Raises: CorticalioException: if the request was not successful ### Response: def getContextsForExpression(self, body, getFingerprint=None, startIndex=0, maxResults=5, sparsity=1.0): """Get semantic contexts for the input expression Args: body, ExpressionOperation: The JSON encoded expression to be evaluated (required) getFingerprint, bool: Configure if the fingerprint should be returned as part of the results (optional) startIndex, int: The start-index for pagination (optional) maxResults, int: Max results per page (optional) sparsity, float: Sparsify the resulting expression to this percentage (optional) Returns: list of Context Raises: CorticalioException: if the request was not successful """ return self._expressions.getContextsForExpression(self._retina, body, getFingerprint, startIndex, maxResults, sparsity)
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document_retrieval_strategy' ) and self.document_retrieval_strategy is not None: _dict[ 'document_retrieval_strategy'] = self.document_retrieval_strategy return _dict
Return a json dictionary representing this model.
Below is the the instruction that describes the task: ### Input: Return a json dictionary representing this model. ### Response: def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'document_retrieval_strategy' ) and self.document_retrieval_strategy is not None: _dict[ 'document_retrieval_strategy'] = self.document_retrieval_strategy return _dict
def _validate_int(name, value, limits=(), strip='%'): ''' Validate the named integer within the supplied limits inclusive and strip supplied unit characters ''' comment = '' # Must be integral try: if isinstance(value, string_types): value = value.strip(' ' + strip) value = int(value) except (TypeError, ValueError): comment += '{0} must be an integer '.format(name) # Must be in range else: if len(limits) == 2: if value < limits[0] or value > limits[1]: comment += '{0} must be in the range [{1[0]}, {1[1]}] '.format(name, limits) return value, comment
Validate the named integer within the supplied limits inclusive and strip supplied unit characters
Below is the the instruction that describes the task: ### Input: Validate the named integer within the supplied limits inclusive and strip supplied unit characters ### Response: def _validate_int(name, value, limits=(), strip='%'): ''' Validate the named integer within the supplied limits inclusive and strip supplied unit characters ''' comment = '' # Must be integral try: if isinstance(value, string_types): value = value.strip(' ' + strip) value = int(value) except (TypeError, ValueError): comment += '{0} must be an integer '.format(name) # Must be in range else: if len(limits) == 2: if value < limits[0] or value > limits[1]: comment += '{0} must be in the range [{1[0]}, {1[1]}] '.format(name, limits) return value, comment
def build_command(self, action, args=None): """Build a SOAP request. Args: action (str): the name of an action (a string as specified in the service description XML file) to be sent. args (list, optional): Relevant arguments as a list of (name, value) tuples. Returns: tuple: a tuple containing the POST headers (as a dict) and a string containing the relevant SOAP body. Does not set content-length, or host headers, which are completed upon sending. """ # A complete request should look something like this: # POST path of control URL HTTP/1.1 # HOST: host of control URL:port of control URL # CONTENT-LENGTH: bytes in body # CONTENT-TYPE: text/xml; charset="utf-8" # SOAPACTION: "urn:schemas-upnp-org:service:serviceType:v#actionName" # # <?xml version="1.0"?> # <s:Envelope # xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" # s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> # <s:Body> # <u:actionName # xmlns:u="urn:schemas-upnp-org:service:serviceType:v"> # <argumentName>in arg value</argumentName> # ... other in args and their values go here, if any # </u:actionName> # </s:Body> # </s:Envelope> arguments = self.wrap_arguments(args) body = self.soap_body_template.format( arguments=arguments, action=action, service_type=self.service_type, version=self.version) soap_action_template = \ "urn:schemas-upnp-org:service:{service_type}:{version}#{action}" soap_action = soap_action_template.format( service_type=self.service_type, version=self.version, action=action) headers = {'Content-Type': 'text/xml; charset="utf-8"', 'SOAPACTION': soap_action} # Note that although we set the charset to utf-8 here, in fact the # body is still unicode. It will only be converted to bytes when it # is set over the network return (headers, body)
Build a SOAP request. Args: action (str): the name of an action (a string as specified in the service description XML file) to be sent. args (list, optional): Relevant arguments as a list of (name, value) tuples. Returns: tuple: a tuple containing the POST headers (as a dict) and a string containing the relevant SOAP body. Does not set content-length, or host headers, which are completed upon sending.
Below is the the instruction that describes the task: ### Input: Build a SOAP request. Args: action (str): the name of an action (a string as specified in the service description XML file) to be sent. args (list, optional): Relevant arguments as a list of (name, value) tuples. Returns: tuple: a tuple containing the POST headers (as a dict) and a string containing the relevant SOAP body. Does not set content-length, or host headers, which are completed upon sending. ### Response: def build_command(self, action, args=None): """Build a SOAP request. Args: action (str): the name of an action (a string as specified in the service description XML file) to be sent. args (list, optional): Relevant arguments as a list of (name, value) tuples. Returns: tuple: a tuple containing the POST headers (as a dict) and a string containing the relevant SOAP body. Does not set content-length, or host headers, which are completed upon sending. """ # A complete request should look something like this: # POST path of control URL HTTP/1.1 # HOST: host of control URL:port of control URL # CONTENT-LENGTH: bytes in body # CONTENT-TYPE: text/xml; charset="utf-8" # SOAPACTION: "urn:schemas-upnp-org:service:serviceType:v#actionName" # # <?xml version="1.0"?> # <s:Envelope # xmlns:s="http://schemas.xmlsoap.org/soap/envelope/" # s:encodingStyle="http://schemas.xmlsoap.org/soap/encoding/"> # <s:Body> # <u:actionName # xmlns:u="urn:schemas-upnp-org:service:serviceType:v"> # <argumentName>in arg value</argumentName> # ... other in args and their values go here, if any # </u:actionName> # </s:Body> # </s:Envelope> arguments = self.wrap_arguments(args) body = self.soap_body_template.format( arguments=arguments, action=action, service_type=self.service_type, version=self.version) soap_action_template = \ "urn:schemas-upnp-org:service:{service_type}:{version}#{action}" soap_action = soap_action_template.format( service_type=self.service_type, version=self.version, action=action) headers = {'Content-Type': 'text/xml; charset="utf-8"', 'SOAPACTION': soap_action} # Note that although we set the charset to utf-8 here, in fact the # body is still unicode. It will only be converted to bytes when it # is set over the network return (headers, body)
def request(self, method, path, query=None, content=None): """ Sends an HTTP request. This constructs a full URL, encodes and decodes HTTP bodies, and handles invalid responses in a pythonic way. @type method: string @param method: HTTP method to use @type path: string @param path: HTTP URL path @type query: list of two-tuples @param query: query arguments to pass to urllib.urlencode @type content: str or None @param content: HTTP body content @rtype: object @return: JSON-Decoded response @raises GanetiApiError: If an invalid response is returned """ if not path.startswith("/"): raise ClientError("Implementation error: Called with bad path %s" % path) body = None if content is not None: data = self._json_encoder.encode(content) body = StringProducer(data) url = self._base_url + path if query: prepare_query(query) params = urlencode(query, doseq=True) url += "?%s" % params log.msg("Sending request to %s %s %s" % (url, self.headers, body), system="Gentleman") d = self._agent.request(method, url, headers=self.headers, bodyProducer=body) protocol = JsonResponseProtocol(d) @d.addErrback def connectionFailed(failure): failure.trap(ConnectionRefusedError) raise GanetiApiError("Connection refused!") @d.addCallback def cb(response): if response.code != 200: raise NotOkayError(code=response.code) response.deliverBody(protocol) return protocol.getData()
Sends an HTTP request. This constructs a full URL, encodes and decodes HTTP bodies, and handles invalid responses in a pythonic way. @type method: string @param method: HTTP method to use @type path: string @param path: HTTP URL path @type query: list of two-tuples @param query: query arguments to pass to urllib.urlencode @type content: str or None @param content: HTTP body content @rtype: object @return: JSON-Decoded response @raises GanetiApiError: If an invalid response is returned
Below is the the instruction that describes the task: ### Input: Sends an HTTP request. This constructs a full URL, encodes and decodes HTTP bodies, and handles invalid responses in a pythonic way. @type method: string @param method: HTTP method to use @type path: string @param path: HTTP URL path @type query: list of two-tuples @param query: query arguments to pass to urllib.urlencode @type content: str or None @param content: HTTP body content @rtype: object @return: JSON-Decoded response @raises GanetiApiError: If an invalid response is returned ### Response: def request(self, method, path, query=None, content=None): """ Sends an HTTP request. This constructs a full URL, encodes and decodes HTTP bodies, and handles invalid responses in a pythonic way. @type method: string @param method: HTTP method to use @type path: string @param path: HTTP URL path @type query: list of two-tuples @param query: query arguments to pass to urllib.urlencode @type content: str or None @param content: HTTP body content @rtype: object @return: JSON-Decoded response @raises GanetiApiError: If an invalid response is returned """ if not path.startswith("/"): raise ClientError("Implementation error: Called with bad path %s" % path) body = None if content is not None: data = self._json_encoder.encode(content) body = StringProducer(data) url = self._base_url + path if query: prepare_query(query) params = urlencode(query, doseq=True) url += "?%s" % params log.msg("Sending request to %s %s %s" % (url, self.headers, body), system="Gentleman") d = self._agent.request(method, url, headers=self.headers, bodyProducer=body) protocol = JsonResponseProtocol(d) @d.addErrback def connectionFailed(failure): failure.trap(ConnectionRefusedError) raise GanetiApiError("Connection refused!") @d.addCallback def cb(response): if response.code != 200: raise NotOkayError(code=response.code) response.deliverBody(protocol) return protocol.getData()
def register_view(self, view_class, *args, **kwargs): """Register an admin view on this admin instance. :param view_class: The view class name passed to the view factory. :param args: Positional arugments for view class. :param kwargs: Keyword arguments to view class. """ protected_view_class = self.view_class_factory(view_class) if 'endpoint' not in kwargs: kwargs['endpoint'] = view_class(*args, **kwargs).endpoint self.admin.add_view(protected_view_class(*args, **kwargs))
Register an admin view on this admin instance. :param view_class: The view class name passed to the view factory. :param args: Positional arugments for view class. :param kwargs: Keyword arguments to view class.
Below is the the instruction that describes the task: ### Input: Register an admin view on this admin instance. :param view_class: The view class name passed to the view factory. :param args: Positional arugments for view class. :param kwargs: Keyword arguments to view class. ### Response: def register_view(self, view_class, *args, **kwargs): """Register an admin view on this admin instance. :param view_class: The view class name passed to the view factory. :param args: Positional arugments for view class. :param kwargs: Keyword arguments to view class. """ protected_view_class = self.view_class_factory(view_class) if 'endpoint' not in kwargs: kwargs['endpoint'] = view_class(*args, **kwargs).endpoint self.admin.add_view(protected_view_class(*args, **kwargs))
def writeInfo(self, location=None, masters=None): """ Write font into the current instance. Note: the masters attribute is ignored at the moment. """ if self.currentInstance is None: return infoElement = ET.Element("info") if location is not None: locationElement = self._makeLocationElement(location) infoElement.append(locationElement) self.currentInstance.append(infoElement)
Write font into the current instance. Note: the masters attribute is ignored at the moment.
Below is the the instruction that describes the task: ### Input: Write font into the current instance. Note: the masters attribute is ignored at the moment. ### Response: def writeInfo(self, location=None, masters=None): """ Write font into the current instance. Note: the masters attribute is ignored at the moment. """ if self.currentInstance is None: return infoElement = ET.Element("info") if location is not None: locationElement = self._makeLocationElement(location) infoElement.append(locationElement) self.currentInstance.append(infoElement)
def save_voxel_grid(voxel_grid, file_name): """ Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str """ try: with open(file_name, 'wb') as fp: for voxel in voxel_grid: fp.write(struct.pack("<I", voxel)) except IOError as e: print("An error occurred: {}".format(e.args[-1])) raise e except Exception: raise
Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str
Below is the the instruction that describes the task: ### Input: Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str ### Response: def save_voxel_grid(voxel_grid, file_name): """ Saves binary voxel grid as a binary file. The binary file is structured in little-endian unsigned int format. :param voxel_grid: binary voxel grid :type voxel_grid: list, tuple :param file_name: file name to save :type file_name: str """ try: with open(file_name, 'wb') as fp: for voxel in voxel_grid: fp.write(struct.pack("<I", voxel)) except IOError as e: print("An error occurred: {}".format(e.args[-1])) raise e except Exception: raise
def process_ubam(bam, **kwargs): """Extracting metrics from unaligned bam format Extracting lengths """ logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam)) samfile = pysam.AlignmentFile(bam, "rb", check_sq=False) if not samfile.has_index(): pysam.index(bam) # Need to reload the samfile after creating index samfile = pysam.AlignmentFile(bam, "rb") logging.info("Nanoget: No index for bam file could be found, created index.") datadf = pd.DataFrame( data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length) for read in samfile.fetch(until_eof=True)], columns=["readIDs", "quals", "lengths"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: ubam {} contains {} reads.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
Extracting metrics from unaligned bam format Extracting lengths
Below is the the instruction that describes the task: ### Input: Extracting metrics from unaligned bam format Extracting lengths ### Response: def process_ubam(bam, **kwargs): """Extracting metrics from unaligned bam format Extracting lengths """ logging.info("Nanoget: Starting to collect statistics from ubam file {}.".format(bam)) samfile = pysam.AlignmentFile(bam, "rb", check_sq=False) if not samfile.has_index(): pysam.index(bam) # Need to reload the samfile after creating index samfile = pysam.AlignmentFile(bam, "rb") logging.info("Nanoget: No index for bam file could be found, created index.") datadf = pd.DataFrame( data=[(read.query_name, nanomath.ave_qual(read.query_qualities), read.query_length) for read in samfile.fetch(until_eof=True)], columns=["readIDs", "quals", "lengths"]) \ .dropna(axis='columns', how='all') \ .dropna(axis='index', how='any') logging.info("Nanoget: ubam {} contains {} reads.".format( bam, datadf["lengths"].size)) return ut.reduce_memory_usage(datadf)
def _binary_search(f, xmin, xmax, eps=1e-9): """Return the largest x such f(x) is True.""" middle = (xmax + xmin) / 2. while xmax - xmin > eps: assert xmin < xmax middle = (xmax + xmin) / 2. if f(xmax): return xmax if not f(xmin): return xmin if f(middle): xmin = middle else: xmax = middle return middle
Return the largest x such f(x) is True.
Below is the the instruction that describes the task: ### Input: Return the largest x such f(x) is True. ### Response: def _binary_search(f, xmin, xmax, eps=1e-9): """Return the largest x such f(x) is True.""" middle = (xmax + xmin) / 2. while xmax - xmin > eps: assert xmin < xmax middle = (xmax + xmin) / 2. if f(xmax): return xmax if not f(xmin): return xmin if f(middle): xmin = middle else: xmax = middle return middle
def _loadService(self, servicePath): """ Check if an application service can be found at the specified path. If found, instantiate it and add it to the application service pool. :param: <str> service file path :return: <void> """ serviceName = ntpath.basename(servicePath).replace(".py", "") # importing service serviceSpec = importlib.util.spec_from_file_location( serviceName, servicePath ) service = importlib.util.module_from_spec(serviceSpec) serviceSpec.loader.exec_module(service) # checking if there is a service in the file if hasattr(service, "Service"): # instantiate the service serviceInstance = service.Service(self.application) self.application.addService( self.name, serviceName, serviceInstance )
Check if an application service can be found at the specified path. If found, instantiate it and add it to the application service pool. :param: <str> service file path :return: <void>
Below is the the instruction that describes the task: ### Input: Check if an application service can be found at the specified path. If found, instantiate it and add it to the application service pool. :param: <str> service file path :return: <void> ### Response: def _loadService(self, servicePath): """ Check if an application service can be found at the specified path. If found, instantiate it and add it to the application service pool. :param: <str> service file path :return: <void> """ serviceName = ntpath.basename(servicePath).replace(".py", "") # importing service serviceSpec = importlib.util.spec_from_file_location( serviceName, servicePath ) service = importlib.util.module_from_spec(serviceSpec) serviceSpec.loader.exec_module(service) # checking if there is a service in the file if hasattr(service, "Service"): # instantiate the service serviceInstance = service.Service(self.application) self.application.addService( self.name, serviceName, serviceInstance )
def pushmem(self, data, size): """ Push block of memory to front of message, as a new frame. Returns 0 on success, -1 on error. """ return lib.zmsg_pushmem(self._as_parameter_, data, size)
Push block of memory to front of message, as a new frame. Returns 0 on success, -1 on error.
Below is the the instruction that describes the task: ### Input: Push block of memory to front of message, as a new frame. Returns 0 on success, -1 on error. ### Response: def pushmem(self, data, size): """ Push block of memory to front of message, as a new frame. Returns 0 on success, -1 on error. """ return lib.zmsg_pushmem(self._as_parameter_, data, size)
def parse_uniprot_xml_metadata(sr): """Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord. Returns: dict: All parsed information """ # TODO: What about "reviewed" status? and EC number xref_dbs_to_keep = ['GO', 'KEGG', 'PDB', 'PROSITE', 'Pfam', 'RefSeq'] infodict = {} infodict['alt_uniprots'] = list(set(sr.annotations['accessions']).difference([sr.id])) infodict['gene_name'] = None if 'gene_name_primary' in sr.annotations: infodict['gene_name'] = sr.annotations['gene_name_primary'] infodict['description'] = sr.description infodict['taxonomy'] = None if 'organism' in sr.annotations: infodict['taxonomy'] = sr.annotations['organism'] infodict['seq_version'] = sr.annotations['sequence_version'] infodict['seq_date'] = sr.annotations['sequence_modified'] infodict['entry_version'] = sr.annotations['version'] infodict['entry_date'] = sr.annotations['modified'] tmp = defaultdict(list) for xref in sr.dbxrefs: database = xref.split(':', 1)[0] xrefs = xref.split(':', 1)[-1] if database in xref_dbs_to_keep: if database == 'PDB': tmp['pdbs'].append(xrefs) else: tmp[database.lower()].append(xrefs) infodict.update(tmp) return infodict
Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord. Returns: dict: All parsed information
Below is the the instruction that describes the task: ### Input: Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord. Returns: dict: All parsed information ### Response: def parse_uniprot_xml_metadata(sr): """Load relevant attributes and dbxrefs from a parsed UniProt XML file in a SeqRecord. Returns: dict: All parsed information """ # TODO: What about "reviewed" status? and EC number xref_dbs_to_keep = ['GO', 'KEGG', 'PDB', 'PROSITE', 'Pfam', 'RefSeq'] infodict = {} infodict['alt_uniprots'] = list(set(sr.annotations['accessions']).difference([sr.id])) infodict['gene_name'] = None if 'gene_name_primary' in sr.annotations: infodict['gene_name'] = sr.annotations['gene_name_primary'] infodict['description'] = sr.description infodict['taxonomy'] = None if 'organism' in sr.annotations: infodict['taxonomy'] = sr.annotations['organism'] infodict['seq_version'] = sr.annotations['sequence_version'] infodict['seq_date'] = sr.annotations['sequence_modified'] infodict['entry_version'] = sr.annotations['version'] infodict['entry_date'] = sr.annotations['modified'] tmp = defaultdict(list) for xref in sr.dbxrefs: database = xref.split(':', 1)[0] xrefs = xref.split(':', 1)[-1] if database in xref_dbs_to_keep: if database == 'PDB': tmp['pdbs'].append(xrefs) else: tmp[database.lower()].append(xrefs) infodict.update(tmp) return infodict
def allDayForDate(self,this_date,timeZone=None): ''' This method determines whether the occurrence lasts the entirety of a specified day in the specified time zone. If no time zone is specified, then it uses the default time zone). Also, give a grace period of a few minutes to account for issues with the way events are sometimes entered. ''' if isinstance(this_date,datetime): d = this_date.date() else: d = this_date date_start = datetime(d.year,d.month,d.day) naive_start = self.startTime if timezone.is_naive(self.startTime) else timezone.make_naive(self.startTime, timezone=timeZone) naive_end = self.endTime if timezone.is_naive(self.endTime) else timezone.make_naive(self.endTime, timezone=timeZone) return ( # Ensure that all comparisons are done in local time naive_start <= date_start and naive_end >= date_start + timedelta(days=1,minutes=-30) )
This method determines whether the occurrence lasts the entirety of a specified day in the specified time zone. If no time zone is specified, then it uses the default time zone). Also, give a grace period of a few minutes to account for issues with the way events are sometimes entered.
Below is the the instruction that describes the task: ### Input: This method determines whether the occurrence lasts the entirety of a specified day in the specified time zone. If no time zone is specified, then it uses the default time zone). Also, give a grace period of a few minutes to account for issues with the way events are sometimes entered. ### Response: def allDayForDate(self,this_date,timeZone=None): ''' This method determines whether the occurrence lasts the entirety of a specified day in the specified time zone. If no time zone is specified, then it uses the default time zone). Also, give a grace period of a few minutes to account for issues with the way events are sometimes entered. ''' if isinstance(this_date,datetime): d = this_date.date() else: d = this_date date_start = datetime(d.year,d.month,d.day) naive_start = self.startTime if timezone.is_naive(self.startTime) else timezone.make_naive(self.startTime, timezone=timeZone) naive_end = self.endTime if timezone.is_naive(self.endTime) else timezone.make_naive(self.endTime, timezone=timeZone) return ( # Ensure that all comparisons are done in local time naive_start <= date_start and naive_end >= date_start + timedelta(days=1,minutes=-30) )
def register (self, target): """ Registers a new virtual target. Checks if there's already registered target, with the same name, type, project and subvariant properties, and also with the same sources and equal action. If such target is found it is retured and 'target' is not registered. Otherwise, 'target' is registered and returned. """ assert isinstance(target, VirtualTarget) if target.path(): signature = target.path() + "-" + target.name() else: signature = "-" + target.name() result = None if signature not in self.cache_: self.cache_ [signature] = [] for t in self.cache_ [signature]: a1 = t.action () a2 = target.action () # TODO: why are we checking for not result? if not result: if not a1 and not a2: result = t else: if a1 and a2 and a1.action_name () == a2.action_name () and a1.sources () == a2.sources (): ps1 = a1.properties () ps2 = a2.properties () p1 = ps1.base () + ps1.free () +\ b2.util.set.difference(ps1.dependency(), ps1.incidental()) p2 = ps2.base () + ps2.free () +\ b2.util.set.difference(ps2.dependency(), ps2.incidental()) if p1 == p2: result = t if not result: self.cache_ [signature].append (target) result = target # TODO: Don't append if we found pre-existing target? self.recent_targets_.append(result) self.all_targets_.append(result) return result
Registers a new virtual target. Checks if there's already registered target, with the same name, type, project and subvariant properties, and also with the same sources and equal action. If such target is found it is retured and 'target' is not registered. Otherwise, 'target' is registered and returned.
Below is the the instruction that describes the task: ### Input: Registers a new virtual target. Checks if there's already registered target, with the same name, type, project and subvariant properties, and also with the same sources and equal action. If such target is found it is retured and 'target' is not registered. Otherwise, 'target' is registered and returned. ### Response: def register (self, target): """ Registers a new virtual target. Checks if there's already registered target, with the same name, type, project and subvariant properties, and also with the same sources and equal action. If such target is found it is retured and 'target' is not registered. Otherwise, 'target' is registered and returned. """ assert isinstance(target, VirtualTarget) if target.path(): signature = target.path() + "-" + target.name() else: signature = "-" + target.name() result = None if signature not in self.cache_: self.cache_ [signature] = [] for t in self.cache_ [signature]: a1 = t.action () a2 = target.action () # TODO: why are we checking for not result? if not result: if not a1 and not a2: result = t else: if a1 and a2 and a1.action_name () == a2.action_name () and a1.sources () == a2.sources (): ps1 = a1.properties () ps2 = a2.properties () p1 = ps1.base () + ps1.free () +\ b2.util.set.difference(ps1.dependency(), ps1.incidental()) p2 = ps2.base () + ps2.free () +\ b2.util.set.difference(ps2.dependency(), ps2.incidental()) if p1 == p2: result = t if not result: self.cache_ [signature].append (target) result = target # TODO: Don't append if we found pre-existing target? self.recent_targets_.append(result) self.all_targets_.append(result) return result
def csep_periodic(ra, rb, L): """Return separation vectors between each pair of the two sets of points. Parameters ---------- ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions. Two sets of points. L: float array, shape (d,) System lengths. Returns ------- csep: float array-like, shape (n, m, d) csep[i, j] is the separation vector from point j to point i. Note the un-intuitive vector direction. """ seps = ra[:, np.newaxis, :] - rb[np.newaxis, :, :] for i_dim in range(ra.shape[1]): seps_dim = seps[:, :, i_dim] seps_dim[seps_dim > L[i_dim] / 2.0] -= L[i_dim] seps_dim[seps_dim < -L[i_dim] / 2.0] += L[i_dim] return seps
Return separation vectors between each pair of the two sets of points. Parameters ---------- ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions. Two sets of points. L: float array, shape (d,) System lengths. Returns ------- csep: float array-like, shape (n, m, d) csep[i, j] is the separation vector from point j to point i. Note the un-intuitive vector direction.
Below is the the instruction that describes the task: ### Input: Return separation vectors between each pair of the two sets of points. Parameters ---------- ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions. Two sets of points. L: float array, shape (d,) System lengths. Returns ------- csep: float array-like, shape (n, m, d) csep[i, j] is the separation vector from point j to point i. Note the un-intuitive vector direction. ### Response: def csep_periodic(ra, rb, L): """Return separation vectors between each pair of the two sets of points. Parameters ---------- ra, rb: float array-like, shape (n, d) and (m, d) in d dimensions. Two sets of points. L: float array, shape (d,) System lengths. Returns ------- csep: float array-like, shape (n, m, d) csep[i, j] is the separation vector from point j to point i. Note the un-intuitive vector direction. """ seps = ra[:, np.newaxis, :] - rb[np.newaxis, :, :] for i_dim in range(ra.shape[1]): seps_dim = seps[:, :, i_dim] seps_dim[seps_dim > L[i_dim] / 2.0] -= L[i_dim] seps_dim[seps_dim < -L[i_dim] / 2.0] += L[i_dim] return seps
def decode_key(cls, pubkey_content): """Decode base64 coded part of the key.""" try: decoded_key = base64.b64decode(pubkey_content.encode("ascii")) except (TypeError, binascii.Error): raise MalformedDataError("Unable to decode the key") return decoded_key
Decode base64 coded part of the key.
Below is the the instruction that describes the task: ### Input: Decode base64 coded part of the key. ### Response: def decode_key(cls, pubkey_content): """Decode base64 coded part of the key.""" try: decoded_key = base64.b64decode(pubkey_content.encode("ascii")) except (TypeError, binascii.Error): raise MalformedDataError("Unable to decode the key") return decoded_key
def move_in_8(library, session, space, offset, length, extended=False): """Moves an 8-bit block of data from the specified address space and offset to local memory. Corresponds to viMoveIn8* functions of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param length: Number of elements to transfer, where the data width of the elements to transfer is identical to the source data width. :param extended: Use 64 bits offset independent of the platform. :return: Data read from the bus, return value of the library call. :rtype: list, :class:`pyvisa.constants.StatusCode` """ buffer_8 = (ViUInt8 * length)() if extended: ret = library.viMoveIn8Ex(session, space, offset, length, buffer_8) else: ret = library.viMoveIn8(session, space, offset, length, buffer_8) return list(buffer_8), ret
Moves an 8-bit block of data from the specified address space and offset to local memory. Corresponds to viMoveIn8* functions of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param length: Number of elements to transfer, where the data width of the elements to transfer is identical to the source data width. :param extended: Use 64 bits offset independent of the platform. :return: Data read from the bus, return value of the library call. :rtype: list, :class:`pyvisa.constants.StatusCode`
Below is the the instruction that describes the task: ### Input: Moves an 8-bit block of data from the specified address space and offset to local memory. Corresponds to viMoveIn8* functions of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param length: Number of elements to transfer, where the data width of the elements to transfer is identical to the source data width. :param extended: Use 64 bits offset independent of the platform. :return: Data read from the bus, return value of the library call. :rtype: list, :class:`pyvisa.constants.StatusCode` ### Response: def move_in_8(library, session, space, offset, length, extended=False): """Moves an 8-bit block of data from the specified address space and offset to local memory. Corresponds to viMoveIn8* functions of the VISA library. :param library: the visa library wrapped by ctypes. :param session: Unique logical identifier to a session. :param space: Specifies the address space. (Constants.*SPACE*) :param offset: Offset (in bytes) of the address or register from which to read. :param length: Number of elements to transfer, where the data width of the elements to transfer is identical to the source data width. :param extended: Use 64 bits offset independent of the platform. :return: Data read from the bus, return value of the library call. :rtype: list, :class:`pyvisa.constants.StatusCode` """ buffer_8 = (ViUInt8 * length)() if extended: ret = library.viMoveIn8Ex(session, space, offset, length, buffer_8) else: ret = library.viMoveIn8(session, space, offset, length, buffer_8) return list(buffer_8), ret
def get_full_dir_name(self): """ Function returns a full dir name """ return os.path.join(self.dir_name.get_text(), self.entry_project_name.get_text())
Function returns a full dir name
Below is the the instruction that describes the task: ### Input: Function returns a full dir name ### Response: def get_full_dir_name(self): """ Function returns a full dir name """ return os.path.join(self.dir_name.get_text(), self.entry_project_name.get_text())
def dm_to_sd(dm): ''' Converts a geographic co-ordinate given in "degrees/minutes" dddmm.mmmm format (eg, "12319.943281" = 123 degrees, 19.943281 minutes) to a signed decimal (python float) format ''' # '12319.943281' if not dm or dm == '0': return 0. d, m = re.match(r'^(\d+)(\d\d\.\d+)$', dm).groups() return float(d) + float(m) / 60
Converts a geographic co-ordinate given in "degrees/minutes" dddmm.mmmm format (eg, "12319.943281" = 123 degrees, 19.943281 minutes) to a signed decimal (python float) format
Below is the the instruction that describes the task: ### Input: Converts a geographic co-ordinate given in "degrees/minutes" dddmm.mmmm format (eg, "12319.943281" = 123 degrees, 19.943281 minutes) to a signed decimal (python float) format ### Response: def dm_to_sd(dm): ''' Converts a geographic co-ordinate given in "degrees/minutes" dddmm.mmmm format (eg, "12319.943281" = 123 degrees, 19.943281 minutes) to a signed decimal (python float) format ''' # '12319.943281' if not dm or dm == '0': return 0. d, m = re.match(r'^(\d+)(\d\d\.\d+)$', dm).groups() return float(d) + float(m) / 60
def _get_variable(vid, variables): """Retrieve an input variable from our existing pool of options. """ if isinstance(vid, six.string_types): vid = get_base_id(vid) else: vid = _get_string_vid(vid) for v in variables: if vid == get_base_id(v["id"]): return copy.deepcopy(v) raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
Retrieve an input variable from our existing pool of options.
Below is the the instruction that describes the task: ### Input: Retrieve an input variable from our existing pool of options. ### Response: def _get_variable(vid, variables): """Retrieve an input variable from our existing pool of options. """ if isinstance(vid, six.string_types): vid = get_base_id(vid) else: vid = _get_string_vid(vid) for v in variables: if vid == get_base_id(v["id"]): return copy.deepcopy(v) raise ValueError("Did not find variable %s in \n%s" % (vid, pprint.pformat(variables)))
def _default_format(self, occur): """Return the default serialization format.""" if self.text or self.children: return self.start_tag() + "%s" + self.end_tag() return self.start_tag(empty=True)
Return the default serialization format.
Below is the the instruction that describes the task: ### Input: Return the default serialization format. ### Response: def _default_format(self, occur): """Return the default serialization format.""" if self.text or self.children: return self.start_tag() + "%s" + self.end_tag() return self.start_tag(empty=True)
def _get_fit_params(self, x, fit_key): """ Transforms the input parameter to fit parameters for the 7dq2 model. That is, maps from x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz] fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a] chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead of chiA and chiB. chi_a = (chiAz - chiBz)/2. """ q, chiAz, chiBz = x[0], x[3], x[6] eta = q/(1.+q)**2 chi_wtAvg = (q*chiAz+chiBz)/(1.+q) chiHat = (chi_wtAvg - 38.*eta/113.*(chiAz + chiBz))/(1. - 76.*eta/113.) chi_a = (chiAz - chiBz)/2. fit_params = x fit_params[0] = np.log(q) fit_params[3] = chiHat fit_params[6] = chi_a return fit_params
Transforms the input parameter to fit parameters for the 7dq2 model. That is, maps from x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz] fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a] chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead of chiA and chiB. chi_a = (chiAz - chiBz)/2.
Below is the the instruction that describes the task: ### Input: Transforms the input parameter to fit parameters for the 7dq2 model. That is, maps from x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz] fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a] chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead of chiA and chiB. chi_a = (chiAz - chiBz)/2. ### Response: def _get_fit_params(self, x, fit_key): """ Transforms the input parameter to fit parameters for the 7dq2 model. That is, maps from x = [q, chiAx, chiAy, chiAz, chiBx, chiBy, chiBz] fit_params = [np.log(q), chiAx, chiAy, chiHat, chiBx, chiBy, chi_a] chiHat is defined in Eq.(3) of 1508.07253, but with chiAz and chiBz instead of chiA and chiB. chi_a = (chiAz - chiBz)/2. """ q, chiAz, chiBz = x[0], x[3], x[6] eta = q/(1.+q)**2 chi_wtAvg = (q*chiAz+chiBz)/(1.+q) chiHat = (chi_wtAvg - 38.*eta/113.*(chiAz + chiBz))/(1. - 76.*eta/113.) chi_a = (chiAz - chiBz)/2. fit_params = x fit_params[0] = np.log(q) fit_params[3] = chiHat fit_params[6] = chi_a return fit_params
def renderHTTP(self, context): """ Render the wrapped resource if HTTPS is already being used, otherwise invoke a helper which may generate a redirect. """ request = IRequest(context) if request.isSecure(): renderer = self.wrappedResource else: renderer = _SecureWrapper(self.urlGenerator, self.wrappedResource) return renderer.renderHTTP(context)
Render the wrapped resource if HTTPS is already being used, otherwise invoke a helper which may generate a redirect.
Below is the the instruction that describes the task: ### Input: Render the wrapped resource if HTTPS is already being used, otherwise invoke a helper which may generate a redirect. ### Response: def renderHTTP(self, context): """ Render the wrapped resource if HTTPS is already being used, otherwise invoke a helper which may generate a redirect. """ request = IRequest(context) if request.isSecure(): renderer = self.wrappedResource else: renderer = _SecureWrapper(self.urlGenerator, self.wrappedResource) return renderer.renderHTTP(context)
def update(self, E=None, **F): """ D.update([E, ]**F) -> None. Update D from dict/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k] """ if hasattr(E, 'keys'): self.extend(E) else: for key, value in E: self._set_key(key, value) self.extend(F)
D.update([E, ]**F) -> None. Update D from dict/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k]
Below is the the instruction that describes the task: ### Input: D.update([E, ]**F) -> None. Update D from dict/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k] ### Response: def update(self, E=None, **F): """ D.update([E, ]**F) -> None. Update D from dict/iterable E and F. If E present and has a .keys() method, does: for k in E: D[k] = E[k] If E present and lacks .keys() method, does: for (k, v) in E: D[k] = v In either case, this is followed by: for k in F: D[k] = F[k] """ if hasattr(E, 'keys'): self.extend(E) else: for key, value in E: self._set_key(key, value) self.extend(F)
def segmentlistdict_fromsearchsummary_out(xmldoc, program = None): """ Convenience wrapper for a common case usage of the segmentlistdict class: searches the process table in xmldoc for occurances of a program named program, then scans the search summary table for matching process IDs and constructs a segmentlistdict object from the out segments in those rows. Note: the segmentlists in the segmentlistdict are not necessarily coalesced, they contain the segments as they appear in the search_summary table. """ stbl = lsctables.SearchSummaryTable.get_table(xmldoc) ptbl = lsctables.ProcessTable.get_table(xmldoc) return stbl.get_out_segmentlistdict(program and ptbl.get_ids_by_program(program))
Convenience wrapper for a common case usage of the segmentlistdict class: searches the process table in xmldoc for occurances of a program named program, then scans the search summary table for matching process IDs and constructs a segmentlistdict object from the out segments in those rows. Note: the segmentlists in the segmentlistdict are not necessarily coalesced, they contain the segments as they appear in the search_summary table.
Below is the the instruction that describes the task: ### Input: Convenience wrapper for a common case usage of the segmentlistdict class: searches the process table in xmldoc for occurances of a program named program, then scans the search summary table for matching process IDs and constructs a segmentlistdict object from the out segments in those rows. Note: the segmentlists in the segmentlistdict are not necessarily coalesced, they contain the segments as they appear in the search_summary table. ### Response: def segmentlistdict_fromsearchsummary_out(xmldoc, program = None): """ Convenience wrapper for a common case usage of the segmentlistdict class: searches the process table in xmldoc for occurances of a program named program, then scans the search summary table for matching process IDs and constructs a segmentlistdict object from the out segments in those rows. Note: the segmentlists in the segmentlistdict are not necessarily coalesced, they contain the segments as they appear in the search_summary table. """ stbl = lsctables.SearchSummaryTable.get_table(xmldoc) ptbl = lsctables.ProcessTable.get_table(xmldoc) return stbl.get_out_segmentlistdict(program and ptbl.get_ids_by_program(program))
def _compute_ll(self): """ m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix """ self.fracs = [] self.logP = [] self.ll = [] for i in range(self.width): Dll = {'A': 0, 'C': 0, 'T': 0, 'G': 0} Df = {'A': 0, 'C': 0, 'T': 0, 'G': 0} DlogP= {'A': 0, 'C': 0, 'T': 0, 'G': 0} for key in self.counts[i].keys(): #print i,key,self.counts[i][key],self.nseqs Pij = self.counts[i][key] / float(self.nseqs) Df [key] = Pij Dll[key] = (math.log( (self.counts[i][key] + self.bgscale*self.background[key] ) / ((self.nseqs + self.bgscale) * self.background[key]) ) / math.log(2)) if Pij > 0: DlogP[key] = math.log(Pij)/math.log(2) else: DlogP[key] = -100 #Near zero self.fracs.append(Df) self.logP.append (DlogP) self.ll.append (Dll) self.P = self.fracs self._compute_bits() self._compute_ambig_ll() self._maxscore()
m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix
Below is the the instruction that describes the task: ### Input: m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix ### Response: def _compute_ll(self): """ m._compute_ll() -- [utility] Compute the log-likelihood matrix from the count matrix """ self.fracs = [] self.logP = [] self.ll = [] for i in range(self.width): Dll = {'A': 0, 'C': 0, 'T': 0, 'G': 0} Df = {'A': 0, 'C': 0, 'T': 0, 'G': 0} DlogP= {'A': 0, 'C': 0, 'T': 0, 'G': 0} for key in self.counts[i].keys(): #print i,key,self.counts[i][key],self.nseqs Pij = self.counts[i][key] / float(self.nseqs) Df [key] = Pij Dll[key] = (math.log( (self.counts[i][key] + self.bgscale*self.background[key] ) / ((self.nseqs + self.bgscale) * self.background[key]) ) / math.log(2)) if Pij > 0: DlogP[key] = math.log(Pij)/math.log(2) else: DlogP[key] = -100 #Near zero self.fracs.append(Df) self.logP.append (DlogP) self.ll.append (Dll) self.P = self.fracs self._compute_bits() self._compute_ambig_ll() self._maxscore()
def get_followers(self, auth_secret): """Get the follower list of a logged-in user. Parameters ---------- auth_secret: str The authentication secret of the logged-in user. Returns ------- bool True if the follower list is successfully obtained, False otherwise. result A dict containing the follower list with the key FOLLOWER_LIST_KEY if the follower list is successfully obtained, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_NOT_LOGGED_IN """ result = {pytwis_constants.ERROR_KEY: None} # Check if the user is logged in. loggedin, userid = self._is_loggedin(auth_secret) if not loggedin: result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN return (False, result) # Get the list of followers' userids. follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid) follower_userids = self._rc.zrange(follower_zset_key, 0, -1) if follower_userids is None or not follower_userids: result[pytwis_constants.FOLLOWER_LIST_KEY] = [] return (True, result) # Get the list of followers' usernames from their userids. with self._rc.pipeline() as pipe: pipe.multi() for follower_userid in follower_userids: follower_userid_profile_key = \ pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid) pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY) result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute() return (True, result)
Get the follower list of a logged-in user. Parameters ---------- auth_secret: str The authentication secret of the logged-in user. Returns ------- bool True if the follower list is successfully obtained, False otherwise. result A dict containing the follower list with the key FOLLOWER_LIST_KEY if the follower list is successfully obtained, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_NOT_LOGGED_IN
Below is the the instruction that describes the task: ### Input: Get the follower list of a logged-in user. Parameters ---------- auth_secret: str The authentication secret of the logged-in user. Returns ------- bool True if the follower list is successfully obtained, False otherwise. result A dict containing the follower list with the key FOLLOWER_LIST_KEY if the follower list is successfully obtained, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_NOT_LOGGED_IN ### Response: def get_followers(self, auth_secret): """Get the follower list of a logged-in user. Parameters ---------- auth_secret: str The authentication secret of the logged-in user. Returns ------- bool True if the follower list is successfully obtained, False otherwise. result A dict containing the follower list with the key FOLLOWER_LIST_KEY if the follower list is successfully obtained, a dict containing the error string with the key ERROR_KEY otherwise. Note ---- Possible error strings are listed as below: - ERROR_NOT_LOGGED_IN """ result = {pytwis_constants.ERROR_KEY: None} # Check if the user is logged in. loggedin, userid = self._is_loggedin(auth_secret) if not loggedin: result[pytwis_constants.ERROR_KEY] = pytwis_constants.ERROR_NOT_LOGGED_IN return (False, result) # Get the list of followers' userids. follower_zset_key = pytwis_constants.FOLLOWER_KEY_FORMAT.format(userid) follower_userids = self._rc.zrange(follower_zset_key, 0, -1) if follower_userids is None or not follower_userids: result[pytwis_constants.FOLLOWER_LIST_KEY] = [] return (True, result) # Get the list of followers' usernames from their userids. with self._rc.pipeline() as pipe: pipe.multi() for follower_userid in follower_userids: follower_userid_profile_key = \ pytwis_constants.USER_PROFILE_KEY_FORMAT.format(follower_userid) pipe.hget(follower_userid_profile_key, pytwis_constants.USERNAME_KEY) result[pytwis_constants.FOLLOWER_LIST_KEY] = pipe.execute() return (True, result)
def adjust_cells(self): """Adjust column size based on contents.""" self.resizeColumnsToContents() fm = self.horizontalHeader().fontMetrics() names = [fm.width(s.name + ' '*9) for s in self.source_model.shortcuts] self.setColumnWidth(NAME, max(names)) self.horizontalHeader().setStretchLastSection(True)
Adjust column size based on contents.
Below is the the instruction that describes the task: ### Input: Adjust column size based on contents. ### Response: def adjust_cells(self): """Adjust column size based on contents.""" self.resizeColumnsToContents() fm = self.horizontalHeader().fontMetrics() names = [fm.width(s.name + ' '*9) for s in self.source_model.shortcuts] self.setColumnWidth(NAME, max(names)) self.horizontalHeader().setStretchLastSection(True)
def _check_db_exists(self, instance): """ Check if the database we're targeting actually exists If not then we won't do any checks This allows the same config to be installed on many servers but fail gracefully """ dsn, host, username, password, database, driver = self._get_access_info(instance, self.DEFAULT_DB_KEY) context = "{} - {}".format(host, database) if self.existing_databases is None: cursor = self.get_cursor(instance, None, self.DEFAULT_DATABASE) try: self.existing_databases = {} cursor.execute(DATABASE_EXISTS_QUERY) for row in cursor: self.existing_databases[row.name] = True except Exception as e: self.log.error("Failed to check if database {} exists: {}".format(database, e)) return False, context finally: self.close_cursor(cursor) return database in self.existing_databases, context
Check if the database we're targeting actually exists If not then we won't do any checks This allows the same config to be installed on many servers but fail gracefully
Below is the the instruction that describes the task: ### Input: Check if the database we're targeting actually exists If not then we won't do any checks This allows the same config to be installed on many servers but fail gracefully ### Response: def _check_db_exists(self, instance): """ Check if the database we're targeting actually exists If not then we won't do any checks This allows the same config to be installed on many servers but fail gracefully """ dsn, host, username, password, database, driver = self._get_access_info(instance, self.DEFAULT_DB_KEY) context = "{} - {}".format(host, database) if self.existing_databases is None: cursor = self.get_cursor(instance, None, self.DEFAULT_DATABASE) try: self.existing_databases = {} cursor.execute(DATABASE_EXISTS_QUERY) for row in cursor: self.existing_databases[row.name] = True except Exception as e: self.log.error("Failed to check if database {} exists: {}".format(database, e)) return False, context finally: self.close_cursor(cursor) return database in self.existing_databases, context
def com_google_fonts_check_maxadvancewidth(ttFont): """MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?""" hhea_advance_width_max = ttFont['hhea'].advanceWidthMax hmtx_advance_width_max = None for g in ttFont['hmtx'].metrics.values(): if hmtx_advance_width_max is None: hmtx_advance_width_max = max(0, g[0]) else: hmtx_advance_width_max = max(g[0], hmtx_advance_width_max) if hmtx_advance_width_max != hhea_advance_width_max: yield FAIL, ("AdvanceWidthMax mismatch: expected {} (from hmtx);" " got {} (from hhea)").format(hmtx_advance_width_max, hhea_advance_width_max) else: yield PASS, ("MaxAdvanceWidth is consistent" " with values in the Hmtx and Hhea tables.")
MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?
Below is the the instruction that describes the task: ### Input: MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables? ### Response: def com_google_fonts_check_maxadvancewidth(ttFont): """MaxAdvanceWidth is consistent with values in the Hmtx and Hhea tables?""" hhea_advance_width_max = ttFont['hhea'].advanceWidthMax hmtx_advance_width_max = None for g in ttFont['hmtx'].metrics.values(): if hmtx_advance_width_max is None: hmtx_advance_width_max = max(0, g[0]) else: hmtx_advance_width_max = max(g[0], hmtx_advance_width_max) if hmtx_advance_width_max != hhea_advance_width_max: yield FAIL, ("AdvanceWidthMax mismatch: expected {} (from hmtx);" " got {} (from hhea)").format(hmtx_advance_width_max, hhea_advance_width_max) else: yield PASS, ("MaxAdvanceWidth is consistent" " with values in the Hmtx and Hhea tables.")
async def setViewLayers(self, layers, iden=None): ''' Args: layers ([str]): A top-down list of of layer guids iden (str): The view iden ( defaults to default view ). ''' if iden is None: iden = self.iden view = self.views.get(iden) if view is None: raise s_exc.NoSuchView(iden=iden) view.setLayers(layers)
Args: layers ([str]): A top-down list of of layer guids iden (str): The view iden ( defaults to default view ).
Below is the the instruction that describes the task: ### Input: Args: layers ([str]): A top-down list of of layer guids iden (str): The view iden ( defaults to default view ). ### Response: async def setViewLayers(self, layers, iden=None): ''' Args: layers ([str]): A top-down list of of layer guids iden (str): The view iden ( defaults to default view ). ''' if iden is None: iden = self.iden view = self.views.get(iden) if view is None: raise s_exc.NoSuchView(iden=iden) view.setLayers(layers)
def to_json(self): """ Returns the JSON Representation of the UI extension. """ result = super(UIExtension, self).to_json() result.update({ 'extension': self.extension }) return result
Returns the JSON Representation of the UI extension.
Below is the the instruction that describes the task: ### Input: Returns the JSON Representation of the UI extension. ### Response: def to_json(self): """ Returns the JSON Representation of the UI extension. """ result = super(UIExtension, self).to_json() result.update({ 'extension': self.extension }) return result
def settle( self, transferred_amount: TokenAmount, locked_amount: TokenAmount, locksroot: Locksroot, partner_transferred_amount: TokenAmount, partner_locked_amount: TokenAmount, partner_locksroot: Locksroot, block_identifier: BlockSpecification, ): """ Settles the channel. """ self.token_network.settle( channel_identifier=self.channel_identifier, transferred_amount=transferred_amount, locked_amount=locked_amount, locksroot=locksroot, partner=self.participant2, partner_transferred_amount=partner_transferred_amount, partner_locked_amount=partner_locked_amount, partner_locksroot=partner_locksroot, given_block_identifier=block_identifier, )
Settles the channel.
Below is the the instruction that describes the task: ### Input: Settles the channel. ### Response: def settle( self, transferred_amount: TokenAmount, locked_amount: TokenAmount, locksroot: Locksroot, partner_transferred_amount: TokenAmount, partner_locked_amount: TokenAmount, partner_locksroot: Locksroot, block_identifier: BlockSpecification, ): """ Settles the channel. """ self.token_network.settle( channel_identifier=self.channel_identifier, transferred_amount=transferred_amount, locked_amount=locked_amount, locksroot=locksroot, partner=self.participant2, partner_transferred_amount=partner_transferred_amount, partner_locked_amount=partner_locked_amount, partner_locksroot=partner_locksroot, given_block_identifier=block_identifier, )
def get_changelog(project_dir=os.curdir, bugtracker_url='', rpm_format=False): """ Retrieves the changelog, from the CHANGELOG file (if in a package) or generates it from the git history. Optionally in rpm-compatible format. :param project_dir: Path to the git repo of the project. :type project_dir: str :param bugtracker_url: Url to the bug tracker for the issues. :type bugtracker_url: str :param rpm_format: if set to True, will make the changelog rpm-compatible :returns: changelog :rtype: str :rises RuntimeError: If the changelog could not be retrieved """ changelog = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') changelog_file = os.path.join(project_dir, 'CHANGELOG') if os.path.exists(pkg_info_file) and os.path.exists(changelog_file): with open(changelog_file) as changelog_fd: changelog = changelog_fd.read() else: changelog = api.get_changelog( repo_path=project_dir, bugtracker_url=bugtracker_url, rpm_format=rpm_format, ) return changelog
Retrieves the changelog, from the CHANGELOG file (if in a package) or generates it from the git history. Optionally in rpm-compatible format. :param project_dir: Path to the git repo of the project. :type project_dir: str :param bugtracker_url: Url to the bug tracker for the issues. :type bugtracker_url: str :param rpm_format: if set to True, will make the changelog rpm-compatible :returns: changelog :rtype: str :rises RuntimeError: If the changelog could not be retrieved
Below is the the instruction that describes the task: ### Input: Retrieves the changelog, from the CHANGELOG file (if in a package) or generates it from the git history. Optionally in rpm-compatible format. :param project_dir: Path to the git repo of the project. :type project_dir: str :param bugtracker_url: Url to the bug tracker for the issues. :type bugtracker_url: str :param rpm_format: if set to True, will make the changelog rpm-compatible :returns: changelog :rtype: str :rises RuntimeError: If the changelog could not be retrieved ### Response: def get_changelog(project_dir=os.curdir, bugtracker_url='', rpm_format=False): """ Retrieves the changelog, from the CHANGELOG file (if in a package) or generates it from the git history. Optionally in rpm-compatible format. :param project_dir: Path to the git repo of the project. :type project_dir: str :param bugtracker_url: Url to the bug tracker for the issues. :type bugtracker_url: str :param rpm_format: if set to True, will make the changelog rpm-compatible :returns: changelog :rtype: str :rises RuntimeError: If the changelog could not be retrieved """ changelog = '' pkg_info_file = os.path.join(project_dir, 'PKG-INFO') changelog_file = os.path.join(project_dir, 'CHANGELOG') if os.path.exists(pkg_info_file) and os.path.exists(changelog_file): with open(changelog_file) as changelog_fd: changelog = changelog_fd.read() else: changelog = api.get_changelog( repo_path=project_dir, bugtracker_url=bugtracker_url, rpm_format=rpm_format, ) return changelog
def get_queue_settings(self, project_key): """ Get queue settings on project :param project_key: str :return: """ url = 'rest/servicedeskapi/queues/{}'.format(project_key) return self.get(url, headers=self.experimental_headers)
Get queue settings on project :param project_key: str :return:
Below is the the instruction that describes the task: ### Input: Get queue settings on project :param project_key: str :return: ### Response: def get_queue_settings(self, project_key): """ Get queue settings on project :param project_key: str :return: """ url = 'rest/servicedeskapi/queues/{}'.format(project_key) return self.get(url, headers=self.experimental_headers)
def prepare_duplicate_order_object(manager, origin_volume, iops, tier, duplicate_size, duplicate_snapshot_size, volume_type, hourly_billing_flag=False): """Prepare the duplicate order to submit to SoftLayer_Product::placeOrder() :param manager: The File or Block manager calling this function :param origin_volume: The origin volume which is being duplicated :param iops: The IOPS for the duplicate volume (performance) :param tier: The tier level for the duplicate volume (endurance) :param duplicate_size: The requested size for the duplicate volume :param duplicate_snapshot_size: The size for the duplicate snapshot space :param volume_type: The type of the origin volume ('file' or 'block') :param hourly_billing_flag: Billing type, monthly (False) or hourly (True) :return: Returns the order object to be passed to the placeOrder() method of the Product_Order service """ # Verify that the origin volume has not been cancelled if 'billingItem' not in origin_volume: raise exceptions.SoftLayerError( "The origin volume has been cancelled; " "unable to order duplicate volume") # Verify that the origin volume has snapshot space (needed for duplication) if isinstance(utils.lookup(origin_volume, 'snapshotCapacityGb'), str): origin_snapshot_size = int(origin_volume['snapshotCapacityGb']) else: raise exceptions.SoftLayerError( "Snapshot space not found for the origin volume. " "Origin snapshot space is needed for duplication.") # Obtain the datacenter location ID for the duplicate if isinstance(utils.lookup(origin_volume, 'billingItem', 'location', 'id'), int): location_id = origin_volume['billingItem']['location']['id'] else: raise exceptions.SoftLayerError( "Cannot find origin volume's location") # Ensure the origin volume is STaaS v2 or higher # and supports Encryption at Rest if not _staas_version_is_v2_or_above(origin_volume): raise exceptions.SoftLayerError( "This volume cannot be duplicated since it " "does not support Encryption at Rest.") # If no specific snapshot space was requested for the duplicate, # use the origin snapshot space size if duplicate_snapshot_size is None: duplicate_snapshot_size = origin_snapshot_size # Use the origin volume size if no size was specified for the duplicate if duplicate_size is None: duplicate_size = origin_volume['capacityGb'] # Get the appropriate package for the order # ('storage_as_a_service' is currently used for duplicate volumes) package = get_package(manager, 'storage_as_a_service') # Determine the IOPS or tier level for the duplicate volume, along with # the type and prices for the order origin_storage_type = origin_volume['storageType']['keyName'] if 'PERFORMANCE' in origin_storage_type: volume_is_performance = True if iops is None: iops = int(origin_volume.get('provisionedIops', 0)) if iops <= 0: raise exceptions.SoftLayerError("Cannot find origin volume's provisioned IOPS") # Set up the price array for the order prices = [ find_price_by_category(package, 'storage_as_a_service'), find_price_by_category(package, 'storage_' + volume_type), find_saas_perform_space_price(package, duplicate_size), find_saas_perform_iops_price(package, duplicate_size, iops), ] # Add the price code for snapshot space as well, unless 0 GB was given if duplicate_snapshot_size > 0: prices.append(find_saas_snapshot_space_price( package, duplicate_snapshot_size, iops=iops)) elif 'ENDURANCE' in origin_storage_type: volume_is_performance = False if tier is None: tier = find_endurance_tier_iops_per_gb(origin_volume) # Set up the price array for the order prices = [ find_price_by_category(package, 'storage_as_a_service'), find_price_by_category(package, 'storage_' + volume_type), find_saas_endurance_space_price(package, duplicate_size, tier), find_saas_endurance_tier_price(package, tier), ] # Add the price code for snapshot space as well, unless 0 GB was given if duplicate_snapshot_size > 0: prices.append(find_saas_snapshot_space_price( package, duplicate_snapshot_size, tier=tier)) else: raise exceptions.SoftLayerError( "Origin volume does not have a valid storage type " "(with an appropriate keyName to indicate the " "volume is a PERFORMANCE or an ENDURANCE volume)") duplicate_order = { 'complexType': 'SoftLayer_Container_Product_Order_' 'Network_Storage_AsAService', 'packageId': package['id'], 'prices': prices, 'volumeSize': duplicate_size, 'quantity': 1, 'location': location_id, 'duplicateOriginVolumeId': origin_volume['id'], 'useHourlyPricing': hourly_billing_flag } if volume_is_performance: duplicate_order['iops'] = iops return duplicate_order
Prepare the duplicate order to submit to SoftLayer_Product::placeOrder() :param manager: The File or Block manager calling this function :param origin_volume: The origin volume which is being duplicated :param iops: The IOPS for the duplicate volume (performance) :param tier: The tier level for the duplicate volume (endurance) :param duplicate_size: The requested size for the duplicate volume :param duplicate_snapshot_size: The size for the duplicate snapshot space :param volume_type: The type of the origin volume ('file' or 'block') :param hourly_billing_flag: Billing type, monthly (False) or hourly (True) :return: Returns the order object to be passed to the placeOrder() method of the Product_Order service
Below is the the instruction that describes the task: ### Input: Prepare the duplicate order to submit to SoftLayer_Product::placeOrder() :param manager: The File or Block manager calling this function :param origin_volume: The origin volume which is being duplicated :param iops: The IOPS for the duplicate volume (performance) :param tier: The tier level for the duplicate volume (endurance) :param duplicate_size: The requested size for the duplicate volume :param duplicate_snapshot_size: The size for the duplicate snapshot space :param volume_type: The type of the origin volume ('file' or 'block') :param hourly_billing_flag: Billing type, monthly (False) or hourly (True) :return: Returns the order object to be passed to the placeOrder() method of the Product_Order service ### Response: def prepare_duplicate_order_object(manager, origin_volume, iops, tier, duplicate_size, duplicate_snapshot_size, volume_type, hourly_billing_flag=False): """Prepare the duplicate order to submit to SoftLayer_Product::placeOrder() :param manager: The File or Block manager calling this function :param origin_volume: The origin volume which is being duplicated :param iops: The IOPS for the duplicate volume (performance) :param tier: The tier level for the duplicate volume (endurance) :param duplicate_size: The requested size for the duplicate volume :param duplicate_snapshot_size: The size for the duplicate snapshot space :param volume_type: The type of the origin volume ('file' or 'block') :param hourly_billing_flag: Billing type, monthly (False) or hourly (True) :return: Returns the order object to be passed to the placeOrder() method of the Product_Order service """ # Verify that the origin volume has not been cancelled if 'billingItem' not in origin_volume: raise exceptions.SoftLayerError( "The origin volume has been cancelled; " "unable to order duplicate volume") # Verify that the origin volume has snapshot space (needed for duplication) if isinstance(utils.lookup(origin_volume, 'snapshotCapacityGb'), str): origin_snapshot_size = int(origin_volume['snapshotCapacityGb']) else: raise exceptions.SoftLayerError( "Snapshot space not found for the origin volume. " "Origin snapshot space is needed for duplication.") # Obtain the datacenter location ID for the duplicate if isinstance(utils.lookup(origin_volume, 'billingItem', 'location', 'id'), int): location_id = origin_volume['billingItem']['location']['id'] else: raise exceptions.SoftLayerError( "Cannot find origin volume's location") # Ensure the origin volume is STaaS v2 or higher # and supports Encryption at Rest if not _staas_version_is_v2_or_above(origin_volume): raise exceptions.SoftLayerError( "This volume cannot be duplicated since it " "does not support Encryption at Rest.") # If no specific snapshot space was requested for the duplicate, # use the origin snapshot space size if duplicate_snapshot_size is None: duplicate_snapshot_size = origin_snapshot_size # Use the origin volume size if no size was specified for the duplicate if duplicate_size is None: duplicate_size = origin_volume['capacityGb'] # Get the appropriate package for the order # ('storage_as_a_service' is currently used for duplicate volumes) package = get_package(manager, 'storage_as_a_service') # Determine the IOPS or tier level for the duplicate volume, along with # the type and prices for the order origin_storage_type = origin_volume['storageType']['keyName'] if 'PERFORMANCE' in origin_storage_type: volume_is_performance = True if iops is None: iops = int(origin_volume.get('provisionedIops', 0)) if iops <= 0: raise exceptions.SoftLayerError("Cannot find origin volume's provisioned IOPS") # Set up the price array for the order prices = [ find_price_by_category(package, 'storage_as_a_service'), find_price_by_category(package, 'storage_' + volume_type), find_saas_perform_space_price(package, duplicate_size), find_saas_perform_iops_price(package, duplicate_size, iops), ] # Add the price code for snapshot space as well, unless 0 GB was given if duplicate_snapshot_size > 0: prices.append(find_saas_snapshot_space_price( package, duplicate_snapshot_size, iops=iops)) elif 'ENDURANCE' in origin_storage_type: volume_is_performance = False if tier is None: tier = find_endurance_tier_iops_per_gb(origin_volume) # Set up the price array for the order prices = [ find_price_by_category(package, 'storage_as_a_service'), find_price_by_category(package, 'storage_' + volume_type), find_saas_endurance_space_price(package, duplicate_size, tier), find_saas_endurance_tier_price(package, tier), ] # Add the price code for snapshot space as well, unless 0 GB was given if duplicate_snapshot_size > 0: prices.append(find_saas_snapshot_space_price( package, duplicate_snapshot_size, tier=tier)) else: raise exceptions.SoftLayerError( "Origin volume does not have a valid storage type " "(with an appropriate keyName to indicate the " "volume is a PERFORMANCE or an ENDURANCE volume)") duplicate_order = { 'complexType': 'SoftLayer_Container_Product_Order_' 'Network_Storage_AsAService', 'packageId': package['id'], 'prices': prices, 'volumeSize': duplicate_size, 'quantity': 1, 'location': location_id, 'duplicateOriginVolumeId': origin_volume['id'], 'useHourlyPricing': hourly_billing_flag } if volume_is_performance: duplicate_order['iops'] = iops return duplicate_order