code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
text
stringlengths
164
112k
def check_owners(self, request, **resources): """ Check parents of current resource. Recursive scanning of the fact that the child has FK to the parent and in resources we have right objects. We check that in request like /author/1/book/2/page/3 Page object with pk=3 has ForeignKey field linked to Book object with pk=2 and Book with pk=2 has ForeignKey field linked to Author object with pk=1. :return bool: If success else raise Exception """ if self._meta.allow_public_access or not self._meta.parent: return True self.parent.check_owners(request, **resources) objects = resources.get(self._meta.name) if self._meta.model and self._meta.parent._meta.model and objects: pr = resources.get(self._meta.parent._meta.name) check = all( pr.pk == getattr( o, "%s_id" % self._meta.parent._meta.name, None) for o in as_tuple(objects)) if not pr or not check: # 403 Error if there is error in parent-children relationship raise HttpError( "Access forbidden.", status=status.HTTP_403_FORBIDDEN) return True
Check parents of current resource. Recursive scanning of the fact that the child has FK to the parent and in resources we have right objects. We check that in request like /author/1/book/2/page/3 Page object with pk=3 has ForeignKey field linked to Book object with pk=2 and Book with pk=2 has ForeignKey field linked to Author object with pk=1. :return bool: If success else raise Exception
Below is the the instruction that describes the task: ### Input: Check parents of current resource. Recursive scanning of the fact that the child has FK to the parent and in resources we have right objects. We check that in request like /author/1/book/2/page/3 Page object with pk=3 has ForeignKey field linked to Book object with pk=2 and Book with pk=2 has ForeignKey field linked to Author object with pk=1. :return bool: If success else raise Exception ### Response: def check_owners(self, request, **resources): """ Check parents of current resource. Recursive scanning of the fact that the child has FK to the parent and in resources we have right objects. We check that in request like /author/1/book/2/page/3 Page object with pk=3 has ForeignKey field linked to Book object with pk=2 and Book with pk=2 has ForeignKey field linked to Author object with pk=1. :return bool: If success else raise Exception """ if self._meta.allow_public_access or not self._meta.parent: return True self.parent.check_owners(request, **resources) objects = resources.get(self._meta.name) if self._meta.model and self._meta.parent._meta.model and objects: pr = resources.get(self._meta.parent._meta.name) check = all( pr.pk == getattr( o, "%s_id" % self._meta.parent._meta.name, None) for o in as_tuple(objects)) if not pr or not check: # 403 Error if there is error in parent-children relationship raise HttpError( "Access forbidden.", status=status.HTTP_403_FORBIDDEN) return True
def parse_exposure(self, node): """ Parses <Exposure> @param node: Node containing the <Exposure> element @type node: xml.etree.Element @raise ParseError: Raised when the exposure name is not being defined in the context of a component type. """ if self.current_component_type == None: self.raise_error('Exposures must be defined in a component type') try: name = node.lattrib['name'] except: self.raise_error('<Exposure> must specify a name') try: dimension = node.lattrib['dimension'] except: self.raise_error("Exposure '{0}' must specify a dimension", name) description = node.lattrib.get('description', '') self.current_component_type.add_exposure(Exposure(name, dimension, description))
Parses <Exposure> @param node: Node containing the <Exposure> element @type node: xml.etree.Element @raise ParseError: Raised when the exposure name is not being defined in the context of a component type.
Below is the the instruction that describes the task: ### Input: Parses <Exposure> @param node: Node containing the <Exposure> element @type node: xml.etree.Element @raise ParseError: Raised when the exposure name is not being defined in the context of a component type. ### Response: def parse_exposure(self, node): """ Parses <Exposure> @param node: Node containing the <Exposure> element @type node: xml.etree.Element @raise ParseError: Raised when the exposure name is not being defined in the context of a component type. """ if self.current_component_type == None: self.raise_error('Exposures must be defined in a component type') try: name = node.lattrib['name'] except: self.raise_error('<Exposure> must specify a name') try: dimension = node.lattrib['dimension'] except: self.raise_error("Exposure '{0}' must specify a dimension", name) description = node.lattrib.get('description', '') self.current_component_type.add_exposure(Exposure(name, dimension, description))
def _default_styles_xml(cls): """ Return a bytestream containing XML for a default styles part. """ path = os.path.join( os.path.split(__file__)[0], '..', 'templates', 'default-styles.xml' ) with open(path, 'rb') as f: xml_bytes = f.read() return xml_bytes
Return a bytestream containing XML for a default styles part.
Below is the the instruction that describes the task: ### Input: Return a bytestream containing XML for a default styles part. ### Response: def _default_styles_xml(cls): """ Return a bytestream containing XML for a default styles part. """ path = os.path.join( os.path.split(__file__)[0], '..', 'templates', 'default-styles.xml' ) with open(path, 'rb') as f: xml_bytes = f.read() return xml_bytes
def get_restore_path(filename): """ get_restore_path: returns path to directory for restoration points Args: filename (str): Name of file to store Returns: string path to file """ path = os.path.join(RESTORE_DIRECTORY, FILE_STORE_LOCATION) if not os.path.exists(path): os.makedirs(path) return os.path.join(path, filename + '.pickle')
get_restore_path: returns path to directory for restoration points Args: filename (str): Name of file to store Returns: string path to file
Below is the the instruction that describes the task: ### Input: get_restore_path: returns path to directory for restoration points Args: filename (str): Name of file to store Returns: string path to file ### Response: def get_restore_path(filename): """ get_restore_path: returns path to directory for restoration points Args: filename (str): Name of file to store Returns: string path to file """ path = os.path.join(RESTORE_DIRECTORY, FILE_STORE_LOCATION) if not os.path.exists(path): os.makedirs(path) return os.path.join(path, filename + '.pickle')
def strip_tail(sequence, values): """Strip `values` from the end of `sequence`.""" return list(reversed(list(strip_head(reversed(sequence), values))))
Strip `values` from the end of `sequence`.
Below is the the instruction that describes the task: ### Input: Strip `values` from the end of `sequence`. ### Response: def strip_tail(sequence, values): """Strip `values` from the end of `sequence`.""" return list(reversed(list(strip_head(reversed(sequence), values))))
def _install_maya(use_threaded_wrapper): """Helper function to Autodesk Maya support""" from maya import utils, cmds def threaded_wrapper(func, *args, **kwargs): return utils.executeInMainThreadWithResult( func, *args, **kwargs) sys.stdout.write("Setting up Pyblish QML in Maya\n") if cmds.about(version=True) == "2018": _remove_googleapiclient() _common_setup("Maya", threaded_wrapper, use_threaded_wrapper)
Helper function to Autodesk Maya support
Below is the the instruction that describes the task: ### Input: Helper function to Autodesk Maya support ### Response: def _install_maya(use_threaded_wrapper): """Helper function to Autodesk Maya support""" from maya import utils, cmds def threaded_wrapper(func, *args, **kwargs): return utils.executeInMainThreadWithResult( func, *args, **kwargs) sys.stdout.write("Setting up Pyblish QML in Maya\n") if cmds.about(version=True) == "2018": _remove_googleapiclient() _common_setup("Maya", threaded_wrapper, use_threaded_wrapper)
def _from_dict(cls, _dict): """Initialize a LeadingSentence object from a json dictionary.""" args = {} if 'text' in _dict: args['text'] = _dict.get('text') if 'location' in _dict: args['location'] = Location._from_dict(_dict.get('location')) if 'element_locations' in _dict: args['element_locations'] = [ ElementLocations._from_dict(x) for x in (_dict.get('element_locations')) ] return cls(**args)
Initialize a LeadingSentence object from a json dictionary.
Below is the the instruction that describes the task: ### Input: Initialize a LeadingSentence object from a json dictionary. ### Response: def _from_dict(cls, _dict): """Initialize a LeadingSentence object from a json dictionary.""" args = {} if 'text' in _dict: args['text'] = _dict.get('text') if 'location' in _dict: args['location'] = Location._from_dict(_dict.get('location')) if 'element_locations' in _dict: args['element_locations'] = [ ElementLocations._from_dict(x) for x in (_dict.get('element_locations')) ] return cls(**args)
def extract_archive(archive, solver, put_inside = False): """ Unzips/untars a previously downloaded archive file. """ print('extracting {0}'.format(archive)) root = os.path.join('solvers', solver if put_inside else '') if archive.endswith('.tar.gz'): if os.path.exists(archive[:-7]): shutil.rmtree(archive[:-7]) tfile = tarfile.open(archive, 'r:gz') tfile.extractall(root) # normally, directory should be the first name # but glucose4.1 has some garbage in the archive for name in tfile.getnames(): if not name.startswith('./.'): directory = name break elif archive.endswith('.zip'): if os.path.exists(archive[:-4]): shutil.rmtree(archive[:-4]) myzip = zipfile.ZipFile(archive, 'r') myzip.extractall(root) directory = myzip.namelist()[0] directory = directory.rstrip('/').split('/')[0] myzip.close() if not put_inside: if os.path.exists(os.path.join('solvers', solver)): shutil.rmtree(os.path.join('solvers', solver)) shutil.move(os.path.join('solvers', directory), os.path.join('solvers', solver))
Unzips/untars a previously downloaded archive file.
Below is the the instruction that describes the task: ### Input: Unzips/untars a previously downloaded archive file. ### Response: def extract_archive(archive, solver, put_inside = False): """ Unzips/untars a previously downloaded archive file. """ print('extracting {0}'.format(archive)) root = os.path.join('solvers', solver if put_inside else '') if archive.endswith('.tar.gz'): if os.path.exists(archive[:-7]): shutil.rmtree(archive[:-7]) tfile = tarfile.open(archive, 'r:gz') tfile.extractall(root) # normally, directory should be the first name # but glucose4.1 has some garbage in the archive for name in tfile.getnames(): if not name.startswith('./.'): directory = name break elif archive.endswith('.zip'): if os.path.exists(archive[:-4]): shutil.rmtree(archive[:-4]) myzip = zipfile.ZipFile(archive, 'r') myzip.extractall(root) directory = myzip.namelist()[0] directory = directory.rstrip('/').split('/')[0] myzip.close() if not put_inside: if os.path.exists(os.path.join('solvers', solver)): shutil.rmtree(os.path.join('solvers', solver)) shutil.move(os.path.join('solvers', directory), os.path.join('solvers', solver))
def bundle_apps(self, bundle_name, bundle_apps): """Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. bundle_apps (list): A list of Apps to include in the bundle. """ bundle_file = os.path.join( self.app_path, self.args.outdir, '{}-bundle.zip'.format(bundle_name) ) z = zipfile.ZipFile(bundle_file, 'w') for app in bundle_apps: # update package data self.package_data['bundle'].append( {'action': 'Adding App:', 'output': os.path.basename(app)} ) z.write(app, os.path.basename(app)) # update package data self.package_data['bundle'].append( {'action': 'Created Bundle:', 'output': os.path.basename(bundle_file)} ) z.close()
Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. bundle_apps (list): A list of Apps to include in the bundle.
Below is the the instruction that describes the task: ### Input: Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. bundle_apps (list): A list of Apps to include in the bundle. ### Response: def bundle_apps(self, bundle_name, bundle_apps): """Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. bundle_apps (list): A list of Apps to include in the bundle. """ bundle_file = os.path.join( self.app_path, self.args.outdir, '{}-bundle.zip'.format(bundle_name) ) z = zipfile.ZipFile(bundle_file, 'w') for app in bundle_apps: # update package data self.package_data['bundle'].append( {'action': 'Adding App:', 'output': os.path.basename(app)} ) z.write(app, os.path.basename(app)) # update package data self.package_data['bundle'].append( {'action': 'Created Bundle:', 'output': os.path.basename(bundle_file)} ) z.close()
def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine', equal_var=False, plot_roc=True, precalc_dist=False, calc_roc=True): ''' Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category ''' cols = df.columns.tolist() if type(precalc_dist) == bool: # compute distnace between rows (transpose to get cols as rows) dist_arr = 1 - pdist(df.transpose(), metric=dist_type) else: dist_arr = precalc_dist # generate sample names with categories sample_combos = list(combinations(range(df.shape[1]),2)) sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)] ser_dist = pd.Series(data=dist_arr, index=sample_names) # find same-cat sample comparisons same_cat = [x for x in sample_names if x.split('_')[1] == 'same'] # find diff-cat sample comparisons diff_cat = [x for x in sample_names if x.split('_')[1] == 'different'] # make series of same and diff category sample comparisons ser_same = ser_dist[same_cat] ser_same.name = 'Same Category' ser_diff = ser_dist[diff_cat] ser_diff.name = 'Different Category' sim_dict = {} roc_data = {} sim_data = {} sim_dict['same'] = ser_same sim_dict['diff'] = ser_diff pval_dict = {} ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var) ttest_stat, pval_dict['mannwhitney'] = mannwhitneyu(ser_diff, ser_same) if calc_roc: # calc AUC true_index = list(np.ones(sim_dict['same'].shape[0])) false_index = list(np.zeros(sim_dict['diff'].shape[0])) y_true = true_index + false_index true_val = list(sim_dict['same'].get_values()) false_val = list(sim_dict['diff'].get_values()) y_score = true_val + false_val fpr, tpr, thresholds = roc_curve(y_true, y_score) inst_auc = auc(fpr, tpr) if plot_roc: plt.figure() plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], color='navy', linestyle='--') plt.figure(figsize=(10,10)) print('AUC', inst_auc) roc_data['true'] = y_true roc_data['score'] = y_score roc_data['fpr'] = fpr roc_data['tpr'] = tpr roc_data['thresholds'] = thresholds roc_data['auc'] = inst_auc sim_data['sim_dict'] = sim_dict sim_data['pval_dict'] = pval_dict sim_data['roc_data'] = roc_data return sim_data
Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category
Below is the the instruction that describes the task: ### Input: Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category ### Response: def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine', equal_var=False, plot_roc=True, precalc_dist=False, calc_roc=True): ''' Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category ''' cols = df.columns.tolist() if type(precalc_dist) == bool: # compute distnace between rows (transpose to get cols as rows) dist_arr = 1 - pdist(df.transpose(), metric=dist_type) else: dist_arr = precalc_dist # generate sample names with categories sample_combos = list(combinations(range(df.shape[1]),2)) sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)] ser_dist = pd.Series(data=dist_arr, index=sample_names) # find same-cat sample comparisons same_cat = [x for x in sample_names if x.split('_')[1] == 'same'] # find diff-cat sample comparisons diff_cat = [x for x in sample_names if x.split('_')[1] == 'different'] # make series of same and diff category sample comparisons ser_same = ser_dist[same_cat] ser_same.name = 'Same Category' ser_diff = ser_dist[diff_cat] ser_diff.name = 'Different Category' sim_dict = {} roc_data = {} sim_data = {} sim_dict['same'] = ser_same sim_dict['diff'] = ser_diff pval_dict = {} ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var) ttest_stat, pval_dict['mannwhitney'] = mannwhitneyu(ser_diff, ser_same) if calc_roc: # calc AUC true_index = list(np.ones(sim_dict['same'].shape[0])) false_index = list(np.zeros(sim_dict['diff'].shape[0])) y_true = true_index + false_index true_val = list(sim_dict['same'].get_values()) false_val = list(sim_dict['diff'].get_values()) y_score = true_val + false_val fpr, tpr, thresholds = roc_curve(y_true, y_score) inst_auc = auc(fpr, tpr) if plot_roc: plt.figure() plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], color='navy', linestyle='--') plt.figure(figsize=(10,10)) print('AUC', inst_auc) roc_data['true'] = y_true roc_data['score'] = y_score roc_data['fpr'] = fpr roc_data['tpr'] = tpr roc_data['thresholds'] = thresholds roc_data['auc'] = inst_auc sim_data['sim_dict'] = sim_dict sim_data['pval_dict'] = pval_dict sim_data['roc_data'] = roc_data return sim_data
def reciprocal_lattice(self) -> "Lattice": """ Return the reciprocal lattice. Note that this is the standard reciprocal lattice used for solid state physics with a factor of 2 * pi. If you are looking for the crystallographic reciprocal lattice, use the reciprocal_lattice_crystallographic property. The property is lazily generated for efficiency. """ v = np.linalg.inv(self._matrix).T return Lattice(v * 2 * np.pi)
Return the reciprocal lattice. Note that this is the standard reciprocal lattice used for solid state physics with a factor of 2 * pi. If you are looking for the crystallographic reciprocal lattice, use the reciprocal_lattice_crystallographic property. The property is lazily generated for efficiency.
Below is the the instruction that describes the task: ### Input: Return the reciprocal lattice. Note that this is the standard reciprocal lattice used for solid state physics with a factor of 2 * pi. If you are looking for the crystallographic reciprocal lattice, use the reciprocal_lattice_crystallographic property. The property is lazily generated for efficiency. ### Response: def reciprocal_lattice(self) -> "Lattice": """ Return the reciprocal lattice. Note that this is the standard reciprocal lattice used for solid state physics with a factor of 2 * pi. If you are looking for the crystallographic reciprocal lattice, use the reciprocal_lattice_crystallographic property. The property is lazily generated for efficiency. """ v = np.linalg.inv(self._matrix).T return Lattice(v * 2 * np.pi)
def calc_directional_aop(self, report, parameter, parameter_dir): """ Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @ e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi> :param report: The planarrad report dictionary. should include the quadtables and the directional info :param parameter: parameter to calc. Currently only sub-surface reflectance rrs. :return: """ lg.debug('calculating the directional ' + parameter) tmp_zenith = [] param_zenith = parameter_dir.split(':')[0] param_azimuth = parameter_dir.split(':')[1] # --------------------------------------------------# # find the mean directions values # --------------------------------------------------# for i_iter in range(0, int(report['vn'][1])): tmp_zenith.append(report['Quad_solid_angle_mean_point_theta'][i_iter][:].split(',')[0]) #that was a pain! tmp_azimuth = report['Quad_solid_angle_mean_point_phi'][1] zenith = scipy.asarray(tmp_zenith, dtype=float) azimuth = scipy.fromstring(tmp_azimuth, dtype=float, sep=',') # --------------------------------------------------# # now grab the min and max index of the closest match # --------------------------------------------------# #min_zenith_idx = (scipy.abs(zenith - param_zenith)).argmin() from scipy import interpolate lw = scipy.zeros(int(report['band_count'][1])) for j_iter in range(0, int(report['band_count'][1])): if parameter == 'rrs': lg.info('Calculating directional rrs') tmp_lw = report['L_w_band_' + str(j_iter + 1)] elif parameter == 'Rrs': lg.info('Calculating directional Rrs') print(report.keys()) tmp_lw = report['L_it_band_' + str(j_iter + 1)] lw_scal = scipy.zeros((int(report['vn'][1]), int(report['hn'][1]))) # for the fist and last line we have to replicate the top and bottom circle for i_iter in range(0, int(report['hn'][1])): lw_scal[0, i_iter] = tmp_lw[0].split(',')[0] lw_scal[int(report['vn'][1]) - 1, i_iter] = tmp_lw[-1].split(',')[0] for i_iter in range(1, int(report['vn'][1]) - 1): lw_scal[i_iter, :] = scipy.asarray(tmp_lw[i_iter].split(','), dtype=float) # to do, make an array of zeros and loop over each list an apply to eah line. bruteforce f1 = interpolate.interp2d(zenith, azimuth, lw_scal) lw[j_iter] = f1(float(param_zenith), float(param_azimuth)) # ---- # Now we finally have L_w we calculate the rrs # ---- if parameter == 'rrs': tmp_rrs = lw / scipy.asarray(report['Ed_w'], dtype=float)[1:] # ignore the first val as that is depth of val elif parameter == 'Rrs': tmp_rrs = lw / scipy.asarray(report['Ed_a'], dtype=float)[1:] # ignore the first val as that is depth of val # make rrs a string so it can be written to file. rrs = ",".join(map(str, tmp_rrs)) return " ," + rrs
Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @ e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi> :param report: The planarrad report dictionary. should include the quadtables and the directional info :param parameter: parameter to calc. Currently only sub-surface reflectance rrs. :return:
Below is the the instruction that describes the task: ### Input: Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @ e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi> :param report: The planarrad report dictionary. should include the quadtables and the directional info :param parameter: parameter to calc. Currently only sub-surface reflectance rrs. :return: ### Response: def calc_directional_aop(self, report, parameter, parameter_dir): """ Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @ e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi> :param report: The planarrad report dictionary. should include the quadtables and the directional info :param parameter: parameter to calc. Currently only sub-surface reflectance rrs. :return: """ lg.debug('calculating the directional ' + parameter) tmp_zenith = [] param_zenith = parameter_dir.split(':')[0] param_azimuth = parameter_dir.split(':')[1] # --------------------------------------------------# # find the mean directions values # --------------------------------------------------# for i_iter in range(0, int(report['vn'][1])): tmp_zenith.append(report['Quad_solid_angle_mean_point_theta'][i_iter][:].split(',')[0]) #that was a pain! tmp_azimuth = report['Quad_solid_angle_mean_point_phi'][1] zenith = scipy.asarray(tmp_zenith, dtype=float) azimuth = scipy.fromstring(tmp_azimuth, dtype=float, sep=',') # --------------------------------------------------# # now grab the min and max index of the closest match # --------------------------------------------------# #min_zenith_idx = (scipy.abs(zenith - param_zenith)).argmin() from scipy import interpolate lw = scipy.zeros(int(report['band_count'][1])) for j_iter in range(0, int(report['band_count'][1])): if parameter == 'rrs': lg.info('Calculating directional rrs') tmp_lw = report['L_w_band_' + str(j_iter + 1)] elif parameter == 'Rrs': lg.info('Calculating directional Rrs') print(report.keys()) tmp_lw = report['L_it_band_' + str(j_iter + 1)] lw_scal = scipy.zeros((int(report['vn'][1]), int(report['hn'][1]))) # for the fist and last line we have to replicate the top and bottom circle for i_iter in range(0, int(report['hn'][1])): lw_scal[0, i_iter] = tmp_lw[0].split(',')[0] lw_scal[int(report['vn'][1]) - 1, i_iter] = tmp_lw[-1].split(',')[0] for i_iter in range(1, int(report['vn'][1]) - 1): lw_scal[i_iter, :] = scipy.asarray(tmp_lw[i_iter].split(','), dtype=float) # to do, make an array of zeros and loop over each list an apply to eah line. bruteforce f1 = interpolate.interp2d(zenith, azimuth, lw_scal) lw[j_iter] = f1(float(param_zenith), float(param_azimuth)) # ---- # Now we finally have L_w we calculate the rrs # ---- if parameter == 'rrs': tmp_rrs = lw / scipy.asarray(report['Ed_w'], dtype=float)[1:] # ignore the first val as that is depth of val elif parameter == 'Rrs': tmp_rrs = lw / scipy.asarray(report['Ed_a'], dtype=float)[1:] # ignore the first val as that is depth of val # make rrs a string so it can be written to file. rrs = ",".join(map(str, tmp_rrs)) return " ," + rrs
def build(self): """Create and start up the internal workers.""" # If there's no output tube, it means that this stage # is at the end of a fork (hasn't been linked to any stage downstream). # Therefore, create one output tube. if not self._output_tubes: self._output_tubes.append(self._worker_class.getTubeClass()()) self._worker_class.assemble( self._worker_args, self._input_tube, self._output_tubes, self._size, self._disable_result, self._do_stop_task, ) # Build all downstream stages. for stage in self._next_stages: stage.build()
Create and start up the internal workers.
Below is the the instruction that describes the task: ### Input: Create and start up the internal workers. ### Response: def build(self): """Create and start up the internal workers.""" # If there's no output tube, it means that this stage # is at the end of a fork (hasn't been linked to any stage downstream). # Therefore, create one output tube. if not self._output_tubes: self._output_tubes.append(self._worker_class.getTubeClass()()) self._worker_class.assemble( self._worker_args, self._input_tube, self._output_tubes, self._size, self._disable_result, self._do_stop_task, ) # Build all downstream stages. for stage in self._next_stages: stage.build()
def find(pattern): ''' Find all instances where the pattern is in the running command .. code-block:: bash salt '*' onyx.cmd find '^snmp-server.*$' .. note:: This uses the `re.MULTILINE` regex format for python, and runs the regex against the whole show_run output. ''' matcher = re.compile(pattern, re.MULTILINE) return matcher.findall(show_run())
Find all instances where the pattern is in the running command .. code-block:: bash salt '*' onyx.cmd find '^snmp-server.*$' .. note:: This uses the `re.MULTILINE` regex format for python, and runs the regex against the whole show_run output.
Below is the the instruction that describes the task: ### Input: Find all instances where the pattern is in the running command .. code-block:: bash salt '*' onyx.cmd find '^snmp-server.*$' .. note:: This uses the `re.MULTILINE` regex format for python, and runs the regex against the whole show_run output. ### Response: def find(pattern): ''' Find all instances where the pattern is in the running command .. code-block:: bash salt '*' onyx.cmd find '^snmp-server.*$' .. note:: This uses the `re.MULTILINE` regex format for python, and runs the regex against the whole show_run output. ''' matcher = re.compile(pattern, re.MULTILINE) return matcher.findall(show_run())
def visit_attribute(self, node): """check if the getattr is an access to a class member if so, register it. Also check for access to protected class member from outside its class (but ignore __special__ methods) """ # Check self if self._uses_mandatory_method_param(node): self._accessed.set_accessed(node) return if not self.linter.is_message_enabled("protected-access"): return self._check_protected_attribute_access(node)
check if the getattr is an access to a class member if so, register it. Also check for access to protected class member from outside its class (but ignore __special__ methods)
Below is the the instruction that describes the task: ### Input: check if the getattr is an access to a class member if so, register it. Also check for access to protected class member from outside its class (but ignore __special__ methods) ### Response: def visit_attribute(self, node): """check if the getattr is an access to a class member if so, register it. Also check for access to protected class member from outside its class (but ignore __special__ methods) """ # Check self if self._uses_mandatory_method_param(node): self._accessed.set_accessed(node) return if not self.linter.is_message_enabled("protected-access"): return self._check_protected_attribute_access(node)
def projective_transform_by_points( x, src, dst, map_args=None, output_shape=None, order=1, mode='constant', cval=0.0, clip=True, preserve_range=False ): """Projective transform by given coordinates, usually 4 coordinates. see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). src : list or numpy The original coordinates, usually 4 coordinates of (width, height). dst : list or numpy The coordinates after transformation, the number of coordinates is the same with src. map_args : dictionary or None Keyword arguments passed to inverse map. output_shape : tuple of 2 int Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified. order : int The order of interpolation. The order has to be in the range 0-5: - 0 Nearest-neighbor - 1 Bi-linear (default) - 2 Bi-quadratic - 3 Bi-cubic - 4 Bi-quartic - 5 Bi-quintic mode : str One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`. Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad. cval : float Used in conjunction with mode `constant`, the value outside the image boundaries. clip : boolean Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : boolean Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float. Returns ------- numpy.array A processed image. Examples -------- Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3) >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h] >>> dst = [[10,10],[0,32],[32,0],[32,32]] >>> x = tl.prepro.projective_transform_by_points(X, src, dst) References ----------- - `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__ - `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__ """ if map_args is None: map_args = {} # if type(src) is list: if isinstance(src, list): # convert to numpy src = np.array(src) # if type(dst) is list: if isinstance(dst, list): dst = np.array(dst) if np.max(x) > 1: # convert to [0, 1] x = x / 255 m = transform.ProjectiveTransform() m.estimate(dst, src) warped = transform.warp( x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range ) return warped
Projective transform by given coordinates, usually 4 coordinates. see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). src : list or numpy The original coordinates, usually 4 coordinates of (width, height). dst : list or numpy The coordinates after transformation, the number of coordinates is the same with src. map_args : dictionary or None Keyword arguments passed to inverse map. output_shape : tuple of 2 int Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified. order : int The order of interpolation. The order has to be in the range 0-5: - 0 Nearest-neighbor - 1 Bi-linear (default) - 2 Bi-quadratic - 3 Bi-cubic - 4 Bi-quartic - 5 Bi-quintic mode : str One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`. Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad. cval : float Used in conjunction with mode `constant`, the value outside the image boundaries. clip : boolean Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : boolean Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float. Returns ------- numpy.array A processed image. Examples -------- Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3) >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h] >>> dst = [[10,10],[0,32],[32,0],[32,32]] >>> x = tl.prepro.projective_transform_by_points(X, src, dst) References ----------- - `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__ - `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__
Below is the the instruction that describes the task: ### Input: Projective transform by given coordinates, usually 4 coordinates. see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). src : list or numpy The original coordinates, usually 4 coordinates of (width, height). dst : list or numpy The coordinates after transformation, the number of coordinates is the same with src. map_args : dictionary or None Keyword arguments passed to inverse map. output_shape : tuple of 2 int Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified. order : int The order of interpolation. The order has to be in the range 0-5: - 0 Nearest-neighbor - 1 Bi-linear (default) - 2 Bi-quadratic - 3 Bi-cubic - 4 Bi-quartic - 5 Bi-quintic mode : str One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`. Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad. cval : float Used in conjunction with mode `constant`, the value outside the image boundaries. clip : boolean Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : boolean Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float. Returns ------- numpy.array A processed image. Examples -------- Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3) >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h] >>> dst = [[10,10],[0,32],[32,0],[32,32]] >>> x = tl.prepro.projective_transform_by_points(X, src, dst) References ----------- - `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__ - `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__ ### Response: def projective_transform_by_points( x, src, dst, map_args=None, output_shape=None, order=1, mode='constant', cval=0.0, clip=True, preserve_range=False ): """Projective transform by given coordinates, usually 4 coordinates. see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). src : list or numpy The original coordinates, usually 4 coordinates of (width, height). dst : list or numpy The coordinates after transformation, the number of coordinates is the same with src. map_args : dictionary or None Keyword arguments passed to inverse map. output_shape : tuple of 2 int Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified. order : int The order of interpolation. The order has to be in the range 0-5: - 0 Nearest-neighbor - 1 Bi-linear (default) - 2 Bi-quadratic - 3 Bi-cubic - 4 Bi-quartic - 5 Bi-quintic mode : str One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`. Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad. cval : float Used in conjunction with mode `constant`, the value outside the image boundaries. clip : boolean Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : boolean Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float. Returns ------- numpy.array A processed image. Examples -------- Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3) >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h] >>> dst = [[10,10],[0,32],[32,0],[32,32]] >>> x = tl.prepro.projective_transform_by_points(X, src, dst) References ----------- - `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__ - `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__ """ if map_args is None: map_args = {} # if type(src) is list: if isinstance(src, list): # convert to numpy src = np.array(src) # if type(dst) is list: if isinstance(dst, list): dst = np.array(dst) if np.max(x) > 1: # convert to [0, 1] x = x / 255 m = transform.ProjectiveTransform() m.estimate(dst, src) warped = transform.warp( x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range ) return warped
def browse_dialog_file(): """ Open up a GUI browse dialog window and let to user select one or more files :return str _path: Target directory path :return list _files: List of selected files """ logger_directory.info("enter browse_dialog") # We make files a list, because the user can multi-select files. _files = [] _path = "" try: _go_to_package() _path_bytes = subprocess.check_output(['python', 'gui_file_browse.py']) _path = _fix_path_bytes(_path_bytes) _files = [i for i in _path] _path = os.path.dirname(_path[0]) logger_directory.info("chosen path: {}, chosen file: {}".format(_path, _files)) except IndexError: logger_directory.warn("directory: browse_dialog_file: IndexError: no file chosen") except Exception as e: logger_directory.error("directory: browse_dialog_file: UnknownError: {}".format(e)) logger_directory.info("exit browse_dialog_file") return _path, _files
Open up a GUI browse dialog window and let to user select one or more files :return str _path: Target directory path :return list _files: List of selected files
Below is the the instruction that describes the task: ### Input: Open up a GUI browse dialog window and let to user select one or more files :return str _path: Target directory path :return list _files: List of selected files ### Response: def browse_dialog_file(): """ Open up a GUI browse dialog window and let to user select one or more files :return str _path: Target directory path :return list _files: List of selected files """ logger_directory.info("enter browse_dialog") # We make files a list, because the user can multi-select files. _files = [] _path = "" try: _go_to_package() _path_bytes = subprocess.check_output(['python', 'gui_file_browse.py']) _path = _fix_path_bytes(_path_bytes) _files = [i for i in _path] _path = os.path.dirname(_path[0]) logger_directory.info("chosen path: {}, chosen file: {}".format(_path, _files)) except IndexError: logger_directory.warn("directory: browse_dialog_file: IndexError: no file chosen") except Exception as e: logger_directory.error("directory: browse_dialog_file: UnknownError: {}".format(e)) logger_directory.info("exit browse_dialog_file") return _path, _files
def deserialize(cls, dict_model): """Returns an unsaved class object based on the valid properties passed in.""" kwargs = {} for f in cls._meta.concrete_fields: if f.attname in dict_model: kwargs[f.attname] = dict_model[f.attname] return cls(**kwargs)
Returns an unsaved class object based on the valid properties passed in.
Below is the the instruction that describes the task: ### Input: Returns an unsaved class object based on the valid properties passed in. ### Response: def deserialize(cls, dict_model): """Returns an unsaved class object based on the valid properties passed in.""" kwargs = {} for f in cls._meta.concrete_fields: if f.attname in dict_model: kwargs[f.attname] = dict_model[f.attname] return cls(**kwargs)
def finalize(self): """Clean up the object. After calling this method the object can't be used anymore. This will be reworked when changing the logging model. """ self.pause_session_logging() self._disable_logging() self._msg_callback = None self._error_msg_callback = None self._warning_msg_callback = None self._info_msg_callback = None
Clean up the object. After calling this method the object can't be used anymore. This will be reworked when changing the logging model.
Below is the the instruction that describes the task: ### Input: Clean up the object. After calling this method the object can't be used anymore. This will be reworked when changing the logging model. ### Response: def finalize(self): """Clean up the object. After calling this method the object can't be used anymore. This will be reworked when changing the logging model. """ self.pause_session_logging() self._disable_logging() self._msg_callback = None self._error_msg_callback = None self._warning_msg_callback = None self._info_msg_callback = None
def delta_e_cie1976(lab_color_vector, lab_color_matrix): """ Calculates the Delta E (CIE1976) between `lab_color_vector` and all colors in `lab_color_matrix`. """ return numpy.sqrt( numpy.sum(numpy.power(lab_color_vector - lab_color_matrix, 2), axis=1))
Calculates the Delta E (CIE1976) between `lab_color_vector` and all colors in `lab_color_matrix`.
Below is the the instruction that describes the task: ### Input: Calculates the Delta E (CIE1976) between `lab_color_vector` and all colors in `lab_color_matrix`. ### Response: def delta_e_cie1976(lab_color_vector, lab_color_matrix): """ Calculates the Delta E (CIE1976) between `lab_color_vector` and all colors in `lab_color_matrix`. """ return numpy.sqrt( numpy.sum(numpy.power(lab_color_vector - lab_color_matrix, 2), axis=1))
def highlight_text(args, parser): """Outputs the result of highlighting a text.""" tokenizer = utils.get_tokenizer(args) corpus = utils.get_corpus(args) output_dir = os.path.abspath(args.output) if os.path.exists(output_dir): parser.exit(status=3, message='Output directory already exists, ' 'aborting.\n') os.makedirs(output_dir, exist_ok=True) if args.ngrams: if args.label is None or len(args.label) != len(args.ngrams): parser.error('There must be as many labels as there are files ' 'of n-grams') report = tacl.NgramHighlightReport(corpus, tokenizer) ngrams = [] for ngram_file in args.ngrams: ngrams.append(utils.get_ngrams(ngram_file)) minus_ngrams = [] if args.minus_ngrams: minus_ngrams = utils.get_ngrams(args.minus_ngrams) report.generate(args.output, args.base_name, ngrams, args.label, minus_ngrams) else: report = tacl.ResultsHighlightReport(corpus, tokenizer) report.generate(args.output, args.base_name, args.results)
Outputs the result of highlighting a text.
Below is the the instruction that describes the task: ### Input: Outputs the result of highlighting a text. ### Response: def highlight_text(args, parser): """Outputs the result of highlighting a text.""" tokenizer = utils.get_tokenizer(args) corpus = utils.get_corpus(args) output_dir = os.path.abspath(args.output) if os.path.exists(output_dir): parser.exit(status=3, message='Output directory already exists, ' 'aborting.\n') os.makedirs(output_dir, exist_ok=True) if args.ngrams: if args.label is None or len(args.label) != len(args.ngrams): parser.error('There must be as many labels as there are files ' 'of n-grams') report = tacl.NgramHighlightReport(corpus, tokenizer) ngrams = [] for ngram_file in args.ngrams: ngrams.append(utils.get_ngrams(ngram_file)) minus_ngrams = [] if args.minus_ngrams: minus_ngrams = utils.get_ngrams(args.minus_ngrams) report.generate(args.output, args.base_name, ngrams, args.label, minus_ngrams) else: report = tacl.ResultsHighlightReport(corpus, tokenizer) report.generate(args.output, args.base_name, args.results)
def _track_class_related_field(cls, field): """ Track a field on a related model """ # field = field on current model # related_field = field on related model (field, related_field) = field.split('__', 1) field_obj = cls._meta.get_field(field) related_cls = field_obj.remote_field.model related_name = field_obj.remote_field.get_accessor_name() if not hasattr(related_cls, '_tracked_related_fields'): setattr(related_cls, '_tracked_related_fields', {}) if related_field not in related_cls._tracked_related_fields.keys(): related_cls._tracked_related_fields[related_field] = [] # There can be several field from different or same model # related to a single model. # Thus _tracked_related_fields will be of the form: # { # 'field name on related model': [ # ('field name on current model', 'field name to current model'), # ('field name on another model', 'field name to another model'), # ... # ], # ... # } related_cls._tracked_related_fields[related_field].append( (field, related_name) ) _add_signals_to_cls(related_cls) # Detect m2m fields changes if isinstance(related_cls._meta.get_field(related_field), ManyToManyField): m2m_changed.connect( tracking_m2m, sender=getattr(related_cls, related_field).through, dispatch_uid=repr(related_cls), )
Track a field on a related model
Below is the the instruction that describes the task: ### Input: Track a field on a related model ### Response: def _track_class_related_field(cls, field): """ Track a field on a related model """ # field = field on current model # related_field = field on related model (field, related_field) = field.split('__', 1) field_obj = cls._meta.get_field(field) related_cls = field_obj.remote_field.model related_name = field_obj.remote_field.get_accessor_name() if not hasattr(related_cls, '_tracked_related_fields'): setattr(related_cls, '_tracked_related_fields', {}) if related_field not in related_cls._tracked_related_fields.keys(): related_cls._tracked_related_fields[related_field] = [] # There can be several field from different or same model # related to a single model. # Thus _tracked_related_fields will be of the form: # { # 'field name on related model': [ # ('field name on current model', 'field name to current model'), # ('field name on another model', 'field name to another model'), # ... # ], # ... # } related_cls._tracked_related_fields[related_field].append( (field, related_name) ) _add_signals_to_cls(related_cls) # Detect m2m fields changes if isinstance(related_cls._meta.get_field(related_field), ManyToManyField): m2m_changed.connect( tracking_m2m, sender=getattr(related_cls, related_field).through, dispatch_uid=repr(related_cls), )
def relaxation_decomp(P, p0, obs, times=[1], k=None, ncv=None): r"""Relaxation experiment. The relaxation experiment describes the time-evolution of an expectation value starting in a non-equilibrium situation. Parameters ---------- P : (M, M) ndarray Transition matrix p0 : (M,) ndarray (optional) Initial distribution for a relaxation experiment obs : (M,) ndarray Observable, represented as vector on state space times : list of int (optional) List of times at which to compute expectation k : int (optional) Number of eigenvalues and amplitudes to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- res : ndarray Array of expectation value at given times """ R, D, L = rdl_decomposition(P, k=k, ncv=ncv) """Extract eigenvalues""" ev = np.diagonal(D) """Amplitudes""" amplitudes = np.dot(p0, R) * np.dot(L, obs) """Propgate eigenvalues""" times = np.asarray(times) ev_t = ev[np.newaxis, :] ** times[:, np.newaxis] """Compute result""" res = np.dot(ev_t, amplitudes) """Truncate imgainary part - is zero anyways""" res = res.real return res
r"""Relaxation experiment. The relaxation experiment describes the time-evolution of an expectation value starting in a non-equilibrium situation. Parameters ---------- P : (M, M) ndarray Transition matrix p0 : (M,) ndarray (optional) Initial distribution for a relaxation experiment obs : (M,) ndarray Observable, represented as vector on state space times : list of int (optional) List of times at which to compute expectation k : int (optional) Number of eigenvalues and amplitudes to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- res : ndarray Array of expectation value at given times
Below is the the instruction that describes the task: ### Input: r"""Relaxation experiment. The relaxation experiment describes the time-evolution of an expectation value starting in a non-equilibrium situation. Parameters ---------- P : (M, M) ndarray Transition matrix p0 : (M,) ndarray (optional) Initial distribution for a relaxation experiment obs : (M,) ndarray Observable, represented as vector on state space times : list of int (optional) List of times at which to compute expectation k : int (optional) Number of eigenvalues and amplitudes to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- res : ndarray Array of expectation value at given times ### Response: def relaxation_decomp(P, p0, obs, times=[1], k=None, ncv=None): r"""Relaxation experiment. The relaxation experiment describes the time-evolution of an expectation value starting in a non-equilibrium situation. Parameters ---------- P : (M, M) ndarray Transition matrix p0 : (M,) ndarray (optional) Initial distribution for a relaxation experiment obs : (M,) ndarray Observable, represented as vector on state space times : list of int (optional) List of times at which to compute expectation k : int (optional) Number of eigenvalues and amplitudes to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- res : ndarray Array of expectation value at given times """ R, D, L = rdl_decomposition(P, k=k, ncv=ncv) """Extract eigenvalues""" ev = np.diagonal(D) """Amplitudes""" amplitudes = np.dot(p0, R) * np.dot(L, obs) """Propgate eigenvalues""" times = np.asarray(times) ev_t = ev[np.newaxis, :] ** times[:, np.newaxis] """Compute result""" res = np.dot(ev_t, amplitudes) """Truncate imgainary part - is zero anyways""" res = res.real return res
def get_all_users(resource_root, view=None): """ Get all users. @param resource_root: The root Resource object @param view: View to materialize ('full' or 'summary'). @return: A list of ApiUser objects. """ return call(resource_root.get, USERS_PATH, ApiUser, True, params=view and dict(view=view) or None)
Get all users. @param resource_root: The root Resource object @param view: View to materialize ('full' or 'summary'). @return: A list of ApiUser objects.
Below is the the instruction that describes the task: ### Input: Get all users. @param resource_root: The root Resource object @param view: View to materialize ('full' or 'summary'). @return: A list of ApiUser objects. ### Response: def get_all_users(resource_root, view=None): """ Get all users. @param resource_root: The root Resource object @param view: View to materialize ('full' or 'summary'). @return: A list of ApiUser objects. """ return call(resource_root.get, USERS_PATH, ApiUser, True, params=view and dict(view=view) or None)
def remove_send_last_message(self, connection): """Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages. """ if connection in self._send_last_message: del self._send_last_message[connection] LOGGER.debug("Removed send_last_message function " "for connection %s", connection) else: LOGGER.warning("Attempted to remove send_last_message " "function for connection %s, but no " "send_last_message function was registered", connection)
Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages.
Below is the the instruction that describes the task: ### Input: Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages. ### Response: def remove_send_last_message(self, connection): """Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages. """ if connection in self._send_last_message: del self._send_last_message[connection] LOGGER.debug("Removed send_last_message function " "for connection %s", connection) else: LOGGER.warning("Attempted to remove send_last_message " "function for connection %s, but no " "send_last_message function was registered", connection)
def get_users(self, course): """ Returns a sorted list of users """ users = OrderedDict(sorted(list(self.user_manager.get_users_info(self.user_manager.get_course_registered_users(course)).items()), key=lambda k: k[1][0] if k[1] is not None else "")) return users
Returns a sorted list of users
Below is the the instruction that describes the task: ### Input: Returns a sorted list of users ### Response: def get_users(self, course): """ Returns a sorted list of users """ users = OrderedDict(sorted(list(self.user_manager.get_users_info(self.user_manager.get_course_registered_users(course)).items()), key=lambda k: k[1][0] if k[1] is not None else "")) return users
def extant_file(file): """ 'Type' for argparse - checks that file exists but does not open. """ if not os.path.exists(file): # Argparse uses the ArgumentTypeError to give a rejection message like: # error: argument input: file does not exist raise argparse.ArgumentTypeError("{0} does not exist".format(file)) return file
'Type' for argparse - checks that file exists but does not open.
Below is the the instruction that describes the task: ### Input: 'Type' for argparse - checks that file exists but does not open. ### Response: def extant_file(file): """ 'Type' for argparse - checks that file exists but does not open. """ if not os.path.exists(file): # Argparse uses the ArgumentTypeError to give a rejection message like: # error: argument input: file does not exist raise argparse.ArgumentTypeError("{0} does not exist".format(file)) return file
def fileinfo(self, fid): """Ask lain about what he knows about given file. If the given file exists in the file dict, it will get updated.""" if not isinstance(fid, str): raise TypeError("Your file ID must be a string") try: info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5) if not info: warnings.warn( f"Your query for file with ID: '{fid}' failed.", RuntimeWarning ) elif fid in self.__files and not self.__files[fid].updated: self.__files[fid].fileupdate(info) except queue.Empty as ex: raise ValueError( "lain didn't produce a callback!\n" "Are you sure your query wasn't malformed?" ) from ex return info
Ask lain about what he knows about given file. If the given file exists in the file dict, it will get updated.
Below is the the instruction that describes the task: ### Input: Ask lain about what he knows about given file. If the given file exists in the file dict, it will get updated. ### Response: def fileinfo(self, fid): """Ask lain about what he knows about given file. If the given file exists in the file dict, it will get updated.""" if not isinstance(fid, str): raise TypeError("Your file ID must be a string") try: info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5) if not info: warnings.warn( f"Your query for file with ID: '{fid}' failed.", RuntimeWarning ) elif fid in self.__files and not self.__files[fid].updated: self.__files[fid].fileupdate(info) except queue.Empty as ex: raise ValueError( "lain didn't produce a callback!\n" "Are you sure your query wasn't malformed?" ) from ex return info
def trim_shared_prefix(ref, alt): """ Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix. """ n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[i] == alt[i]: i += 1 # guaranteed that ref and alt agree on all the characters # up to i'th position, so it doesn't matter which one we pull # the prefix out of prefix = ref[:i] ref_suffix = ref[i:] alt_suffix = alt[i:] return ref_suffix, alt_suffix, prefix
Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix.
Below is the the instruction that describes the task: ### Input: Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix. ### Response: def trim_shared_prefix(ref, alt): """ Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix. """ n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[i] == alt[i]: i += 1 # guaranteed that ref and alt agree on all the characters # up to i'th position, so it doesn't matter which one we pull # the prefix out of prefix = ref[:i] ref_suffix = ref[i:] alt_suffix = alt[i:] return ref_suffix, alt_suffix, prefix
def remove_object(self, name): """ Remove the object exposed under that name. If no object is registered under the supplied name, a RuntimeError is raised. :param name: Name of object to be removed. """ if name not in self._object_map: raise RuntimeError('No object with name {} is registered.'.format(name)) for fn_name in list(self._function_map.keys()): if fn_name.startswith(name + '.') or fn_name.startswith(name + ':'): self._remove_function(fn_name) del self._object_map[name]
Remove the object exposed under that name. If no object is registered under the supplied name, a RuntimeError is raised. :param name: Name of object to be removed.
Below is the the instruction that describes the task: ### Input: Remove the object exposed under that name. If no object is registered under the supplied name, a RuntimeError is raised. :param name: Name of object to be removed. ### Response: def remove_object(self, name): """ Remove the object exposed under that name. If no object is registered under the supplied name, a RuntimeError is raised. :param name: Name of object to be removed. """ if name not in self._object_map: raise RuntimeError('No object with name {} is registered.'.format(name)) for fn_name in list(self._function_map.keys()): if fn_name.startswith(name + '.') or fn_name.startswith(name + ':'): self._remove_function(fn_name) del self._object_map[name]
def devop_read(self, args, bustype): '''read from device''' if len(args) < 5: print("Usage: devop read <spi|i2c> name bus address regstart count") return name = args[0] bus = int(args[1],base=0) address = int(args[2],base=0) reg = int(args[3],base=0) count = int(args[4],base=0) self.master.mav.device_op_read_send(self.target_system, self.target_component, self.request_id, bustype, bus, address, name, reg, count) self.request_id += 1
read from device
Below is the the instruction that describes the task: ### Input: read from device ### Response: def devop_read(self, args, bustype): '''read from device''' if len(args) < 5: print("Usage: devop read <spi|i2c> name bus address regstart count") return name = args[0] bus = int(args[1],base=0) address = int(args[2],base=0) reg = int(args[3],base=0) count = int(args[4],base=0) self.master.mav.device_op_read_send(self.target_system, self.target_component, self.request_id, bustype, bus, address, name, reg, count) self.request_id += 1
def get_events_by_tripI_and_dsut(self, trip_I, day_start_ut, start_ut=None, end_ut=None): """ Get trip data as a list of events (i.e. dicts). Parameters ---------- trip_I : int shorthand index of the trip. day_start_ut : int the start time of the day in unix time (seconds) start_ut : int, optional consider only events that start after this time If not specified, this filtering is not applied. end_ut : int, optional Consider only events that end before this time If not specified, this filtering is not applied. Returns ------- events: list of dicts each element contains the following data: from_stop: int (stop_I) to_stop: int (stop_I) dep_time_ut: int (in unix time) arr_time_ut: int (in unix time) """ # for checking input: assert day_start_ut <= start_ut assert day_start_ut <= end_ut assert start_ut <= end_ut events = [] # check that trip takes place on that day: if not self.tripI_takes_place_on_dsut(trip_I, day_start_ut): return events query = """SELECT stop_I, arr_time_ds+?, dep_time_ds+? FROM stop_times JOIN stops USING(stop_I) WHERE (trip_I = ?) """ params = [day_start_ut, day_start_ut, trip_I] if start_ut: query += "AND (dep_time_ds > ?-?)" params += [start_ut, day_start_ut] if end_ut: query += "AND (arr_time_ds < ?-?)" params += [end_ut, day_start_ut] query += "ORDER BY arr_time_ds" cur = self.conn.cursor() rows = cur.execute(query, params) stop_data = list(rows) for i in range(len(stop_data) - 1): event = { "from_stop": stop_data[i][0], "to_stop": stop_data[i + 1][0], "dep_time_ut": stop_data[i][2], "arr_time_ut": stop_data[i + 1][1] } events.append(event) return events
Get trip data as a list of events (i.e. dicts). Parameters ---------- trip_I : int shorthand index of the trip. day_start_ut : int the start time of the day in unix time (seconds) start_ut : int, optional consider only events that start after this time If not specified, this filtering is not applied. end_ut : int, optional Consider only events that end before this time If not specified, this filtering is not applied. Returns ------- events: list of dicts each element contains the following data: from_stop: int (stop_I) to_stop: int (stop_I) dep_time_ut: int (in unix time) arr_time_ut: int (in unix time)
Below is the the instruction that describes the task: ### Input: Get trip data as a list of events (i.e. dicts). Parameters ---------- trip_I : int shorthand index of the trip. day_start_ut : int the start time of the day in unix time (seconds) start_ut : int, optional consider only events that start after this time If not specified, this filtering is not applied. end_ut : int, optional Consider only events that end before this time If not specified, this filtering is not applied. Returns ------- events: list of dicts each element contains the following data: from_stop: int (stop_I) to_stop: int (stop_I) dep_time_ut: int (in unix time) arr_time_ut: int (in unix time) ### Response: def get_events_by_tripI_and_dsut(self, trip_I, day_start_ut, start_ut=None, end_ut=None): """ Get trip data as a list of events (i.e. dicts). Parameters ---------- trip_I : int shorthand index of the trip. day_start_ut : int the start time of the day in unix time (seconds) start_ut : int, optional consider only events that start after this time If not specified, this filtering is not applied. end_ut : int, optional Consider only events that end before this time If not specified, this filtering is not applied. Returns ------- events: list of dicts each element contains the following data: from_stop: int (stop_I) to_stop: int (stop_I) dep_time_ut: int (in unix time) arr_time_ut: int (in unix time) """ # for checking input: assert day_start_ut <= start_ut assert day_start_ut <= end_ut assert start_ut <= end_ut events = [] # check that trip takes place on that day: if not self.tripI_takes_place_on_dsut(trip_I, day_start_ut): return events query = """SELECT stop_I, arr_time_ds+?, dep_time_ds+? FROM stop_times JOIN stops USING(stop_I) WHERE (trip_I = ?) """ params = [day_start_ut, day_start_ut, trip_I] if start_ut: query += "AND (dep_time_ds > ?-?)" params += [start_ut, day_start_ut] if end_ut: query += "AND (arr_time_ds < ?-?)" params += [end_ut, day_start_ut] query += "ORDER BY arr_time_ds" cur = self.conn.cursor() rows = cur.execute(query, params) stop_data = list(rows) for i in range(len(stop_data) - 1): event = { "from_stop": stop_data[i][0], "to_stop": stop_data[i + 1][0], "dep_time_ut": stop_data[i][2], "arr_time_ut": stop_data[i + 1][1] } events.append(event) return events
def enable(self): """ Enable message carbons. :raises RuntimeError: if the server does not support message carbons. :raises aioxmpp.XMPPError: if the server responded with an error to the request. :raises: as specified in :meth:`aioxmpp.Client.send` """ yield from self._check_for_feature() iq = aioxmpp.IQ( type_=aioxmpp.IQType.SET, payload=carbons_xso.Enable() ) yield from self.client.send(iq)
Enable message carbons. :raises RuntimeError: if the server does not support message carbons. :raises aioxmpp.XMPPError: if the server responded with an error to the request. :raises: as specified in :meth:`aioxmpp.Client.send`
Below is the the instruction that describes the task: ### Input: Enable message carbons. :raises RuntimeError: if the server does not support message carbons. :raises aioxmpp.XMPPError: if the server responded with an error to the request. :raises: as specified in :meth:`aioxmpp.Client.send` ### Response: def enable(self): """ Enable message carbons. :raises RuntimeError: if the server does not support message carbons. :raises aioxmpp.XMPPError: if the server responded with an error to the request. :raises: as specified in :meth:`aioxmpp.Client.send` """ yield from self._check_for_feature() iq = aioxmpp.IQ( type_=aioxmpp.IQType.SET, payload=carbons_xso.Enable() ) yield from self.client.send(iq)
def wait_for_readability(self): """ Stop current thread until the channel is readable. :Return: `False` if it won't be readable (e.g. is closed) """ with self.lock: while True: if self._socket is None or self._eof: return False if self._state in ("connected", "closing"): return True if self._state == "tls-handshake" and \ self._tls_state == "want_read": return True self._state_cond.wait()
Stop current thread until the channel is readable. :Return: `False` if it won't be readable (e.g. is closed)
Below is the the instruction that describes the task: ### Input: Stop current thread until the channel is readable. :Return: `False` if it won't be readable (e.g. is closed) ### Response: def wait_for_readability(self): """ Stop current thread until the channel is readable. :Return: `False` if it won't be readable (e.g. is closed) """ with self.lock: while True: if self._socket is None or self._eof: return False if self._state in ("connected", "closing"): return True if self._state == "tls-handshake" and \ self._tls_state == "want_read": return True self._state_cond.wait()
def set_position(x, y, stream=STD_OUTPUT_HANDLE): ''' Sets current position of the cursor. ''' stream = kernel32.GetStdHandle(stream) value = x + (y << 16) kernel32.SetConsoleCursorPosition(stream, c_long(value))
Sets current position of the cursor.
Below is the the instruction that describes the task: ### Input: Sets current position of the cursor. ### Response: def set_position(x, y, stream=STD_OUTPUT_HANDLE): ''' Sets current position of the cursor. ''' stream = kernel32.GetStdHandle(stream) value = x + (y << 16) kernel32.SetConsoleCursorPosition(stream, c_long(value))
def screenshot(self): """Take screenshot Return: PIL.Image """ url = urljoin(self.__device_url, "screenshot") r = httpdo('GET', url) raw_image = base64.b64decode(r.value) return Image.open(StringIO(raw_image))
Take screenshot Return: PIL.Image
Below is the the instruction that describes the task: ### Input: Take screenshot Return: PIL.Image ### Response: def screenshot(self): """Take screenshot Return: PIL.Image """ url = urljoin(self.__device_url, "screenshot") r = httpdo('GET', url) raw_image = base64.b64decode(r.value) return Image.open(StringIO(raw_image))
def _set_valid_lifetime(self, v, load=False): """ Setter method for valid_lifetime, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/valid_lifetime (common-def:time-interval-sec) If this variable is read-only (config: false) in the source YANG file, then _set_valid_lifetime is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_valid_lifetime() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..4294967295']}), is_leaf=True, yang_name="valid-lifetime", rest_name="valid-lifetime", parent=self, choice=(u'ch-valid-type', u'ca-valid-lifetime'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configures valid lifetime', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """valid_lifetime must be of a type compatible with common-def:time-interval-sec""", 'defined-type': "common-def:time-interval-sec", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..4294967295']}), is_leaf=True, yang_name="valid-lifetime", rest_name="valid-lifetime", parent=self, choice=(u'ch-valid-type', u'ca-valid-lifetime'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configures valid lifetime', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)""", }) self.__valid_lifetime = t if hasattr(self, '_set'): self._set()
Setter method for valid_lifetime, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/valid_lifetime (common-def:time-interval-sec) If this variable is read-only (config: false) in the source YANG file, then _set_valid_lifetime is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_valid_lifetime() directly.
Below is the the instruction that describes the task: ### Input: Setter method for valid_lifetime, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/valid_lifetime (common-def:time-interval-sec) If this variable is read-only (config: false) in the source YANG file, then _set_valid_lifetime is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_valid_lifetime() directly. ### Response: def _set_valid_lifetime(self, v, load=False): """ Setter method for valid_lifetime, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/valid_lifetime (common-def:time-interval-sec) If this variable is read-only (config: false) in the source YANG file, then _set_valid_lifetime is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_valid_lifetime() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..4294967295']}), is_leaf=True, yang_name="valid-lifetime", rest_name="valid-lifetime", parent=self, choice=(u'ch-valid-type', u'ca-valid-lifetime'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configures valid lifetime', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """valid_lifetime must be of a type compatible with common-def:time-interval-sec""", 'defined-type': "common-def:time-interval-sec", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..4294967295']}), is_leaf=True, yang_name="valid-lifetime", rest_name="valid-lifetime", parent=self, choice=(u'ch-valid-type', u'ca-valid-lifetime'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configures valid lifetime', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)""", }) self.__valid_lifetime = t if hasattr(self, '_set'): self._set()
async def get_power_parameters(self): """Get the power paramters for this node.""" data = await self._handler.power_parameters(system_id=self.system_id) return data
Get the power paramters for this node.
Below is the the instruction that describes the task: ### Input: Get the power paramters for this node. ### Response: async def get_power_parameters(self): """Get the power paramters for this node.""" data = await self._handler.power_parameters(system_id=self.system_id) return data
async def __handle_ping(self, _ : Ping): """ Handle a Ping message. Pong the backend """ self.__last_ping = time.time() await ZMQUtils.send(self.__backend_socket, Pong())
Handle a Ping message. Pong the backend
Below is the the instruction that describes the task: ### Input: Handle a Ping message. Pong the backend ### Response: async def __handle_ping(self, _ : Ping): """ Handle a Ping message. Pong the backend """ self.__last_ping = time.time() await ZMQUtils.send(self.__backend_socket, Pong())
def __query_options(self): """Get the query options string to use for this query.""" options = 0 if self.__tailable: options |= _QUERY_OPTIONS["tailable_cursor"] if self.__slave_okay or self.__pool._slave_okay: options |= _QUERY_OPTIONS["slave_okay"] if not self.__timeout: options |= _QUERY_OPTIONS["no_timeout"] return options
Get the query options string to use for this query.
Below is the the instruction that describes the task: ### Input: Get the query options string to use for this query. ### Response: def __query_options(self): """Get the query options string to use for this query.""" options = 0 if self.__tailable: options |= _QUERY_OPTIONS["tailable_cursor"] if self.__slave_okay or self.__pool._slave_okay: options |= _QUERY_OPTIONS["slave_okay"] if not self.__timeout: options |= _QUERY_OPTIONS["no_timeout"] return options
def query_raw(self, query, order_by=None, limit=None, offset=0): """ Do a full-text query on the OpenSearch API using the format specified in https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release. Parameters ---------- query : str The query string. order_by: str, optional A comma-separated list of fields to order by (on server side). Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively. Ascending order is used, if prefix is omitted. Example: "cloudcoverpercentage, -beginposition". limit: int, optional Maximum number of products returned. Defaults to no limit. offset: int, optional The number of results to skip. Defaults to 0. Returns ------- dict[string, dict] Products returned by the query as a dictionary with the product ID as the key and the product's attributes (a dictionary) as the value. """ warnings.warn( "query_raw() has been merged with query(). use query(raw=...) instead.", PendingDeprecationWarning ) return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
Do a full-text query on the OpenSearch API using the format specified in https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release. Parameters ---------- query : str The query string. order_by: str, optional A comma-separated list of fields to order by (on server side). Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively. Ascending order is used, if prefix is omitted. Example: "cloudcoverpercentage, -beginposition". limit: int, optional Maximum number of products returned. Defaults to no limit. offset: int, optional The number of results to skip. Defaults to 0. Returns ------- dict[string, dict] Products returned by the query as a dictionary with the product ID as the key and the product's attributes (a dictionary) as the value.
Below is the the instruction that describes the task: ### Input: Do a full-text query on the OpenSearch API using the format specified in https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release. Parameters ---------- query : str The query string. order_by: str, optional A comma-separated list of fields to order by (on server side). Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively. Ascending order is used, if prefix is omitted. Example: "cloudcoverpercentage, -beginposition". limit: int, optional Maximum number of products returned. Defaults to no limit. offset: int, optional The number of results to skip. Defaults to 0. Returns ------- dict[string, dict] Products returned by the query as a dictionary with the product ID as the key and the product's attributes (a dictionary) as the value. ### Response: def query_raw(self, query, order_by=None, limit=None, offset=0): """ Do a full-text query on the OpenSearch API using the format specified in https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release. Parameters ---------- query : str The query string. order_by: str, optional A comma-separated list of fields to order by (on server side). Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively. Ascending order is used, if prefix is omitted. Example: "cloudcoverpercentage, -beginposition". limit: int, optional Maximum number of products returned. Defaults to no limit. offset: int, optional The number of results to skip. Defaults to 0. Returns ------- dict[string, dict] Products returned by the query as a dictionary with the product ID as the key and the product's attributes (a dictionary) as the value. """ warnings.warn( "query_raw() has been merged with query(). use query(raw=...) instead.", PendingDeprecationWarning ) return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
def view(self, shape=None, chunks=None, dtype=None, fill_value=None, filters=None, read_only=None, synchronizer=None): """Return an array sharing the same data. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. dtype : string or dtype, optional NumPy dtype. fill_value : object Default value to use for uninitialized portions of the array. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. read_only : bool, optional True if array should be protected against modification. synchronizer : object, optional Array synchronizer. Notes ----- WARNING: This is an experimental feature and should be used with care. There are plenty of ways to generate errors and/or cause data corruption. Examples -------- Bypass filters: >>> import zarr >>> import numpy as np >>> np.random.seed(42) >>> labels = ['female', 'male'] >>> data = np.random.choice(labels, size=10000) >>> filters = [zarr.Categorize(labels=labels, ... dtype=data.dtype, ... astype='u1')] >>> a = zarr.array(data, chunks=1000, filters=filters) >>> a[:] array(['female', 'male', 'female', ..., 'male', 'male', 'female'], dtype='<U6') >>> v = a.view(dtype='u1', filters=[]) >>> v.is_view True >>> v[:] array([1, 2, 1, ..., 2, 2, 1], dtype=uint8) Views can be used to modify data: >>> x = v[:] >>> x.sort() >>> v[:] = x >>> v[:] array([1, 1, 1, ..., 2, 2, 2], dtype=uint8) >>> a[:] array(['female', 'female', 'female', ..., 'male', 'male', 'male'], dtype='<U6') View as a different dtype with the same item size: >>> data = np.random.randint(0, 2, size=10000, dtype='u1') >>> a = zarr.array(data, chunks=1000) >>> a[:] array([0, 0, 1, ..., 1, 0, 0], dtype=uint8) >>> v = a.view(dtype=bool) >>> v[:] array([False, False, True, ..., True, False, False]) >>> np.all(a[:].view(dtype=bool) == v[:]) True An array can be viewed with a dtype with a different item size, however some care is needed to adjust the shape and chunk shape so that chunk data is interpreted correctly: >>> data = np.arange(10000, dtype='u2') >>> a = zarr.array(data, chunks=1000) >>> a[:10] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16) >>> v = a.view(dtype='u1', shape=20000, chunks=2000) >>> v[:10] array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8) >>> np.all(a[:].view('u1') == v[:]) True Change fill value for uninitialized chunks: >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1') >>> a[:] array([-1, -1, -1, ..., -1, -1, -1], dtype=int8) >>> v = a.view(fill_value=42) >>> v[:] array([42, 42, 42, ..., 42, 42, 42], dtype=int8) Note that resizing or appending to views is not permitted: >>> a = zarr.empty(10000) >>> v = a.view() >>> try: ... v.resize(20000) ... except PermissionError as e: ... print(e) operation not permitted for views """ store = self._store chunk_store = self._chunk_store path = self._path if read_only is None: read_only = self._read_only if synchronizer is None: synchronizer = self._synchronizer a = Array(store=store, path=path, chunk_store=chunk_store, read_only=read_only, synchronizer=synchronizer, cache_metadata=True) a._is_view = True # allow override of some properties if dtype is None: dtype = self._dtype else: dtype = np.dtype(dtype) a._dtype = dtype if shape is None: shape = self._shape else: shape = normalize_shape(shape) a._shape = shape if chunks is not None: chunks = normalize_chunks(chunks, shape, dtype.itemsize) a._chunks = chunks if fill_value is not None: a._fill_value = fill_value if filters is not None: a._filters = filters return a
Return an array sharing the same data. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. dtype : string or dtype, optional NumPy dtype. fill_value : object Default value to use for uninitialized portions of the array. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. read_only : bool, optional True if array should be protected against modification. synchronizer : object, optional Array synchronizer. Notes ----- WARNING: This is an experimental feature and should be used with care. There are plenty of ways to generate errors and/or cause data corruption. Examples -------- Bypass filters: >>> import zarr >>> import numpy as np >>> np.random.seed(42) >>> labels = ['female', 'male'] >>> data = np.random.choice(labels, size=10000) >>> filters = [zarr.Categorize(labels=labels, ... dtype=data.dtype, ... astype='u1')] >>> a = zarr.array(data, chunks=1000, filters=filters) >>> a[:] array(['female', 'male', 'female', ..., 'male', 'male', 'female'], dtype='<U6') >>> v = a.view(dtype='u1', filters=[]) >>> v.is_view True >>> v[:] array([1, 2, 1, ..., 2, 2, 1], dtype=uint8) Views can be used to modify data: >>> x = v[:] >>> x.sort() >>> v[:] = x >>> v[:] array([1, 1, 1, ..., 2, 2, 2], dtype=uint8) >>> a[:] array(['female', 'female', 'female', ..., 'male', 'male', 'male'], dtype='<U6') View as a different dtype with the same item size: >>> data = np.random.randint(0, 2, size=10000, dtype='u1') >>> a = zarr.array(data, chunks=1000) >>> a[:] array([0, 0, 1, ..., 1, 0, 0], dtype=uint8) >>> v = a.view(dtype=bool) >>> v[:] array([False, False, True, ..., True, False, False]) >>> np.all(a[:].view(dtype=bool) == v[:]) True An array can be viewed with a dtype with a different item size, however some care is needed to adjust the shape and chunk shape so that chunk data is interpreted correctly: >>> data = np.arange(10000, dtype='u2') >>> a = zarr.array(data, chunks=1000) >>> a[:10] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16) >>> v = a.view(dtype='u1', shape=20000, chunks=2000) >>> v[:10] array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8) >>> np.all(a[:].view('u1') == v[:]) True Change fill value for uninitialized chunks: >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1') >>> a[:] array([-1, -1, -1, ..., -1, -1, -1], dtype=int8) >>> v = a.view(fill_value=42) >>> v[:] array([42, 42, 42, ..., 42, 42, 42], dtype=int8) Note that resizing or appending to views is not permitted: >>> a = zarr.empty(10000) >>> v = a.view() >>> try: ... v.resize(20000) ... except PermissionError as e: ... print(e) operation not permitted for views
Below is the the instruction that describes the task: ### Input: Return an array sharing the same data. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. dtype : string or dtype, optional NumPy dtype. fill_value : object Default value to use for uninitialized portions of the array. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. read_only : bool, optional True if array should be protected against modification. synchronizer : object, optional Array synchronizer. Notes ----- WARNING: This is an experimental feature and should be used with care. There are plenty of ways to generate errors and/or cause data corruption. Examples -------- Bypass filters: >>> import zarr >>> import numpy as np >>> np.random.seed(42) >>> labels = ['female', 'male'] >>> data = np.random.choice(labels, size=10000) >>> filters = [zarr.Categorize(labels=labels, ... dtype=data.dtype, ... astype='u1')] >>> a = zarr.array(data, chunks=1000, filters=filters) >>> a[:] array(['female', 'male', 'female', ..., 'male', 'male', 'female'], dtype='<U6') >>> v = a.view(dtype='u1', filters=[]) >>> v.is_view True >>> v[:] array([1, 2, 1, ..., 2, 2, 1], dtype=uint8) Views can be used to modify data: >>> x = v[:] >>> x.sort() >>> v[:] = x >>> v[:] array([1, 1, 1, ..., 2, 2, 2], dtype=uint8) >>> a[:] array(['female', 'female', 'female', ..., 'male', 'male', 'male'], dtype='<U6') View as a different dtype with the same item size: >>> data = np.random.randint(0, 2, size=10000, dtype='u1') >>> a = zarr.array(data, chunks=1000) >>> a[:] array([0, 0, 1, ..., 1, 0, 0], dtype=uint8) >>> v = a.view(dtype=bool) >>> v[:] array([False, False, True, ..., True, False, False]) >>> np.all(a[:].view(dtype=bool) == v[:]) True An array can be viewed with a dtype with a different item size, however some care is needed to adjust the shape and chunk shape so that chunk data is interpreted correctly: >>> data = np.arange(10000, dtype='u2') >>> a = zarr.array(data, chunks=1000) >>> a[:10] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16) >>> v = a.view(dtype='u1', shape=20000, chunks=2000) >>> v[:10] array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8) >>> np.all(a[:].view('u1') == v[:]) True Change fill value for uninitialized chunks: >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1') >>> a[:] array([-1, -1, -1, ..., -1, -1, -1], dtype=int8) >>> v = a.view(fill_value=42) >>> v[:] array([42, 42, 42, ..., 42, 42, 42], dtype=int8) Note that resizing or appending to views is not permitted: >>> a = zarr.empty(10000) >>> v = a.view() >>> try: ... v.resize(20000) ... except PermissionError as e: ... print(e) operation not permitted for views ### Response: def view(self, shape=None, chunks=None, dtype=None, fill_value=None, filters=None, read_only=None, synchronizer=None): """Return an array sharing the same data. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. dtype : string or dtype, optional NumPy dtype. fill_value : object Default value to use for uninitialized portions of the array. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. read_only : bool, optional True if array should be protected against modification. synchronizer : object, optional Array synchronizer. Notes ----- WARNING: This is an experimental feature and should be used with care. There are plenty of ways to generate errors and/or cause data corruption. Examples -------- Bypass filters: >>> import zarr >>> import numpy as np >>> np.random.seed(42) >>> labels = ['female', 'male'] >>> data = np.random.choice(labels, size=10000) >>> filters = [zarr.Categorize(labels=labels, ... dtype=data.dtype, ... astype='u1')] >>> a = zarr.array(data, chunks=1000, filters=filters) >>> a[:] array(['female', 'male', 'female', ..., 'male', 'male', 'female'], dtype='<U6') >>> v = a.view(dtype='u1', filters=[]) >>> v.is_view True >>> v[:] array([1, 2, 1, ..., 2, 2, 1], dtype=uint8) Views can be used to modify data: >>> x = v[:] >>> x.sort() >>> v[:] = x >>> v[:] array([1, 1, 1, ..., 2, 2, 2], dtype=uint8) >>> a[:] array(['female', 'female', 'female', ..., 'male', 'male', 'male'], dtype='<U6') View as a different dtype with the same item size: >>> data = np.random.randint(0, 2, size=10000, dtype='u1') >>> a = zarr.array(data, chunks=1000) >>> a[:] array([0, 0, 1, ..., 1, 0, 0], dtype=uint8) >>> v = a.view(dtype=bool) >>> v[:] array([False, False, True, ..., True, False, False]) >>> np.all(a[:].view(dtype=bool) == v[:]) True An array can be viewed with a dtype with a different item size, however some care is needed to adjust the shape and chunk shape so that chunk data is interpreted correctly: >>> data = np.arange(10000, dtype='u2') >>> a = zarr.array(data, chunks=1000) >>> a[:10] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16) >>> v = a.view(dtype='u1', shape=20000, chunks=2000) >>> v[:10] array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8) >>> np.all(a[:].view('u1') == v[:]) True Change fill value for uninitialized chunks: >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1') >>> a[:] array([-1, -1, -1, ..., -1, -1, -1], dtype=int8) >>> v = a.view(fill_value=42) >>> v[:] array([42, 42, 42, ..., 42, 42, 42], dtype=int8) Note that resizing or appending to views is not permitted: >>> a = zarr.empty(10000) >>> v = a.view() >>> try: ... v.resize(20000) ... except PermissionError as e: ... print(e) operation not permitted for views """ store = self._store chunk_store = self._chunk_store path = self._path if read_only is None: read_only = self._read_only if synchronizer is None: synchronizer = self._synchronizer a = Array(store=store, path=path, chunk_store=chunk_store, read_only=read_only, synchronizer=synchronizer, cache_metadata=True) a._is_view = True # allow override of some properties if dtype is None: dtype = self._dtype else: dtype = np.dtype(dtype) a._dtype = dtype if shape is None: shape = self._shape else: shape = normalize_shape(shape) a._shape = shape if chunks is not None: chunks = normalize_chunks(chunks, shape, dtype.itemsize) a._chunks = chunks if fill_value is not None: a._fill_value = fill_value if filters is not None: a._filters = filters return a
def find_task(self, request, context): """Finds one specific task.""" _log_request(request, context) task = self.listener.memory.tasks.get(request.task_uuid) if not task: return clearly_pb2.TaskMessage() return ClearlyServer._event_to_pb(task)[1]
Finds one specific task.
Below is the the instruction that describes the task: ### Input: Finds one specific task. ### Response: def find_task(self, request, context): """Finds one specific task.""" _log_request(request, context) task = self.listener.memory.tasks.get(request.task_uuid) if not task: return clearly_pb2.TaskMessage() return ClearlyServer._event_to_pb(task)[1]
def reprovision_and_retry(func): """ Wraps the `errback` callback of the API functions, automatically trying to re-provision if the app ID can not be found during the operation. If that's unsuccessful, it will raise the UnknownAppID error. """ @functools.wraps(func) def wrapper(*a, **kw): errback = kw.get('errback', None) if errback is None: def errback(e): raise e def errback_wrapper(e): if isinstance(e, UnknownAppID) and 'INITIAL' in OPTIONS: try: for initial in OPTIONS['INITIAL']: provision(*initial) # retry provisioning the initial setup func(*a, **kw) # and try the function once more except Exception, new_exc: errback(new_exc) # throwing the new exception else: errback(e) # not an instance of UnknownAppID - nothing we can do here kw['errback'] = errback_wrapper return func(*a, **kw) return wrapper
Wraps the `errback` callback of the API functions, automatically trying to re-provision if the app ID can not be found during the operation. If that's unsuccessful, it will raise the UnknownAppID error.
Below is the the instruction that describes the task: ### Input: Wraps the `errback` callback of the API functions, automatically trying to re-provision if the app ID can not be found during the operation. If that's unsuccessful, it will raise the UnknownAppID error. ### Response: def reprovision_and_retry(func): """ Wraps the `errback` callback of the API functions, automatically trying to re-provision if the app ID can not be found during the operation. If that's unsuccessful, it will raise the UnknownAppID error. """ @functools.wraps(func) def wrapper(*a, **kw): errback = kw.get('errback', None) if errback is None: def errback(e): raise e def errback_wrapper(e): if isinstance(e, UnknownAppID) and 'INITIAL' in OPTIONS: try: for initial in OPTIONS['INITIAL']: provision(*initial) # retry provisioning the initial setup func(*a, **kw) # and try the function once more except Exception, new_exc: errback(new_exc) # throwing the new exception else: errback(e) # not an instance of UnknownAppID - nothing we can do here kw['errback'] = errback_wrapper return func(*a, **kw) return wrapper
def radio_calibration_send(self, aileron, elevator, rudder, gyro, pitch, throttle, force_mavlink1=False): ''' Complete set of calibration parameters for the radio aileron : Aileron setpoints: left, center, right (uint16_t) elevator : Elevator setpoints: nose down, center, nose up (uint16_t) rudder : Rudder setpoints: nose left, center, nose right (uint16_t) gyro : Tail gyro mode/gain setpoints: heading hold, rate mode (uint16_t) pitch : Pitch curve setpoints (every 25%) (uint16_t) throttle : Throttle curve setpoints (every 25%) (uint16_t) ''' return self.send(self.radio_calibration_encode(aileron, elevator, rudder, gyro, pitch, throttle), force_mavlink1=force_mavlink1)
Complete set of calibration parameters for the radio aileron : Aileron setpoints: left, center, right (uint16_t) elevator : Elevator setpoints: nose down, center, nose up (uint16_t) rudder : Rudder setpoints: nose left, center, nose right (uint16_t) gyro : Tail gyro mode/gain setpoints: heading hold, rate mode (uint16_t) pitch : Pitch curve setpoints (every 25%) (uint16_t) throttle : Throttle curve setpoints (every 25%) (uint16_t)
Below is the the instruction that describes the task: ### Input: Complete set of calibration parameters for the radio aileron : Aileron setpoints: left, center, right (uint16_t) elevator : Elevator setpoints: nose down, center, nose up (uint16_t) rudder : Rudder setpoints: nose left, center, nose right (uint16_t) gyro : Tail gyro mode/gain setpoints: heading hold, rate mode (uint16_t) pitch : Pitch curve setpoints (every 25%) (uint16_t) throttle : Throttle curve setpoints (every 25%) (uint16_t) ### Response: def radio_calibration_send(self, aileron, elevator, rudder, gyro, pitch, throttle, force_mavlink1=False): ''' Complete set of calibration parameters for the radio aileron : Aileron setpoints: left, center, right (uint16_t) elevator : Elevator setpoints: nose down, center, nose up (uint16_t) rudder : Rudder setpoints: nose left, center, nose right (uint16_t) gyro : Tail gyro mode/gain setpoints: heading hold, rate mode (uint16_t) pitch : Pitch curve setpoints (every 25%) (uint16_t) throttle : Throttle curve setpoints (every 25%) (uint16_t) ''' return self.send(self.radio_calibration_encode(aileron, elevator, rudder, gyro, pitch, throttle), force_mavlink1=force_mavlink1)
def states(self): """ All valid states; "Q" in the mathematical description of a state machine. """ return frozenset( chain.from_iterable( (inState, outState) for (inState, inputSymbol, outState, outputSymbol) in self._transitions ) )
All valid states; "Q" in the mathematical description of a state machine.
Below is the the instruction that describes the task: ### Input: All valid states; "Q" in the mathematical description of a state machine. ### Response: def states(self): """ All valid states; "Q" in the mathematical description of a state machine. """ return frozenset( chain.from_iterable( (inState, outState) for (inState, inputSymbol, outState, outputSymbol) in self._transitions ) )
def pypi_register(server='pypitest'): """Register and prep user for PyPi upload. .. note:: May need to weak ~/.pypirc file per issue: http://stackoverflow.com/questions/1569315 """ base_command = 'python setup.py register' if server == 'pypitest': command = base_command + ' -r https://testpypi.python.org/pypi' else: command = base_command _execute_setup_command(command)
Register and prep user for PyPi upload. .. note:: May need to weak ~/.pypirc file per issue: http://stackoverflow.com/questions/1569315
Below is the the instruction that describes the task: ### Input: Register and prep user for PyPi upload. .. note:: May need to weak ~/.pypirc file per issue: http://stackoverflow.com/questions/1569315 ### Response: def pypi_register(server='pypitest'): """Register and prep user for PyPi upload. .. note:: May need to weak ~/.pypirc file per issue: http://stackoverflow.com/questions/1569315 """ base_command = 'python setup.py register' if server == 'pypitest': command = base_command + ' -r https://testpypi.python.org/pypi' else: command = base_command _execute_setup_command(command)
def requires_private_key(func): """ Decorator for functions that require the private key to be defined. """ def func_wrapper(self, *args, **kwargs): if hasattr(self, "_DiffieHellman__private_key"): func(self, *args, **kwargs) else: self.generate_private_key() func(self, *args, **kwargs) return func_wrapper
Decorator for functions that require the private key to be defined.
Below is the the instruction that describes the task: ### Input: Decorator for functions that require the private key to be defined. ### Response: def requires_private_key(func): """ Decorator for functions that require the private key to be defined. """ def func_wrapper(self, *args, **kwargs): if hasattr(self, "_DiffieHellman__private_key"): func(self, *args, **kwargs) else: self.generate_private_key() func(self, *args, **kwargs) return func_wrapper
def read_estimations(est_file, boundaries_id, labels_id=None, **params): """Reads the estimations (boundaries and/or labels) from a jams file containing the estimations of an algorithm. Parameters ---------- est_file : str Path to the estimated file (JAMS file). boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- boundaries : np.array((N,2)) Array containing the estimated boundaries in intervals. labels : np.array(N) Array containing the estimated labels. Empty array if labels_id is None. """ # Open file and read jams jam = jams.load(est_file) # Find correct estimation est = find_estimation(jam, boundaries_id, labels_id, params) if est is None: raise NoEstimationsError("No estimations for file: %s" % est_file) # Get data values all_boundaries, all_labels = est.to_interval_values() if params["hier"]: hier_bounds = defaultdict(list) hier_labels = defaultdict(list) for bounds, labels in zip(all_boundaries, all_labels): level = labels["level"] hier_bounds[level].append(bounds) hier_labels[level].append(labels["label"]) # Order all_boundaries = [] all_labels = [] for key in sorted(list(hier_bounds.keys())): all_boundaries.append(np.asarray(hier_bounds[key])) all_labels.append(np.asarray(hier_labels[key])) return all_boundaries, all_labels
Reads the estimations (boundaries and/or labels) from a jams file containing the estimations of an algorithm. Parameters ---------- est_file : str Path to the estimated file (JAMS file). boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- boundaries : np.array((N,2)) Array containing the estimated boundaries in intervals. labels : np.array(N) Array containing the estimated labels. Empty array if labels_id is None.
Below is the the instruction that describes the task: ### Input: Reads the estimations (boundaries and/or labels) from a jams file containing the estimations of an algorithm. Parameters ---------- est_file : str Path to the estimated file (JAMS file). boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- boundaries : np.array((N,2)) Array containing the estimated boundaries in intervals. labels : np.array(N) Array containing the estimated labels. Empty array if labels_id is None. ### Response: def read_estimations(est_file, boundaries_id, labels_id=None, **params): """Reads the estimations (boundaries and/or labels) from a jams file containing the estimations of an algorithm. Parameters ---------- est_file : str Path to the estimated file (JAMS file). boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- boundaries : np.array((N,2)) Array containing the estimated boundaries in intervals. labels : np.array(N) Array containing the estimated labels. Empty array if labels_id is None. """ # Open file and read jams jam = jams.load(est_file) # Find correct estimation est = find_estimation(jam, boundaries_id, labels_id, params) if est is None: raise NoEstimationsError("No estimations for file: %s" % est_file) # Get data values all_boundaries, all_labels = est.to_interval_values() if params["hier"]: hier_bounds = defaultdict(list) hier_labels = defaultdict(list) for bounds, labels in zip(all_boundaries, all_labels): level = labels["level"] hier_bounds[level].append(bounds) hier_labels[level].append(labels["label"]) # Order all_boundaries = [] all_labels = [] for key in sorted(list(hier_bounds.keys())): all_boundaries.append(np.asarray(hier_bounds[key])) all_labels.append(np.asarray(hier_labels[key])) return all_boundaries, all_labels
def binary_value_or_stdin(value): """ Return fsencoded value or read raw data from stdin if value is None. """ if value is None: reader = io.open(sys.stdin.fileno(), mode='rb', closefd=False) return reader.read() elif six.PY3: return os.fsencode(value) else: return value
Return fsencoded value or read raw data from stdin if value is None.
Below is the the instruction that describes the task: ### Input: Return fsencoded value or read raw data from stdin if value is None. ### Response: def binary_value_or_stdin(value): """ Return fsencoded value or read raw data from stdin if value is None. """ if value is None: reader = io.open(sys.stdin.fileno(), mode='rb', closefd=False) return reader.read() elif six.PY3: return os.fsencode(value) else: return value
def get_column_list_prefixed(self): """ Returns a list of columns """ return map( lambda x: ".".join([self.name, x]), self.columns )
Returns a list of columns
Below is the the instruction that describes the task: ### Input: Returns a list of columns ### Response: def get_column_list_prefixed(self): """ Returns a list of columns """ return map( lambda x: ".".join([self.name, x]), self.columns )
def _get_policies(minion_id, config): ''' Get the policies that should be applied to a token for minion_id ''' _, grains, _ = salt.utils.minions.get_minion_data(minion_id, __opts__) policy_patterns = config.get( 'policies', ['saltstack/minion/{minion}', 'saltstack/minions'] ) mappings = {'minion': minion_id, 'grains': grains or {}} policies = [] for pattern in policy_patterns: try: for expanded_pattern in _expand_pattern_lists(pattern, **mappings): policies.append( expanded_pattern.format(**mappings) .lower() # Vault requirement ) except KeyError: log.warning('Could not resolve policy pattern %s', pattern) log.debug('%s policies: %s', minion_id, policies) return policies
Get the policies that should be applied to a token for minion_id
Below is the the instruction that describes the task: ### Input: Get the policies that should be applied to a token for minion_id ### Response: def _get_policies(minion_id, config): ''' Get the policies that should be applied to a token for minion_id ''' _, grains, _ = salt.utils.minions.get_minion_data(minion_id, __opts__) policy_patterns = config.get( 'policies', ['saltstack/minion/{minion}', 'saltstack/minions'] ) mappings = {'minion': minion_id, 'grains': grains or {}} policies = [] for pattern in policy_patterns: try: for expanded_pattern in _expand_pattern_lists(pattern, **mappings): policies.append( expanded_pattern.format(**mappings) .lower() # Vault requirement ) except KeyError: log.warning('Could not resolve policy pattern %s', pattern) log.debug('%s policies: %s', minion_id, policies) return policies
def show_configure(self): """ Show the configuration window, or deiconify (un-minimise) it if it's already open. """ logging.info("Displaying configuration window") self.configWindow.show() self.configWindow.showNormal() self.configWindow.activateWindow()
Show the configuration window, or deiconify (un-minimise) it if it's already open.
Below is the the instruction that describes the task: ### Input: Show the configuration window, or deiconify (un-minimise) it if it's already open. ### Response: def show_configure(self): """ Show the configuration window, or deiconify (un-minimise) it if it's already open. """ logging.info("Displaying configuration window") self.configWindow.show() self.configWindow.showNormal() self.configWindow.activateWindow()
def chart_maker(Int, Top, start=100, outfile='chart.txt'): """ Makes a chart for performing IZZI experiments. Print out the file and tape it to the oven. This chart will help keep track of the different steps. Z : performed in zero field - enter the temperature XXX.0 in the sio formatted measurement file created by the LabView program I : performed in the lab field written at the top of the form P : a pTRM step - performed at the temperature and in the lab field. Parameters __________ Int : list of intervals [e.g., 50,10,5] Top : list of upper bounds for each interval [e.g., 500, 550, 600] start : first temperature step, default is 100 outfile : name of output file, default is 'chart.txt' Output _________ creates a file with: file: write down the name of the measurement file field: write down the lab field for the infield steps (in uT) the type of step (Z: zerofield, I: infield, P: pTRM step temperature of the step and code for SIO-like treatment steps XXX.0 [zero field] XXX.1 [in field] XXX.2 [pTRM check] - done in a lab field date : date the step was performed run # : an optional run number zones I-III : field in the zones in the oven start : time the run was started sp : time the setpoint was reached cool : time cooling started """ low, k, iz = start, 0, 0 Tzero = [] f = open('chart.txt', 'w') vline = '\t%s\n' % ( ' | | | | | | | |') hline = '______________________________________________________________________________\n' f.write('file:_________________ field:___________uT\n\n\n') f.write('%s\n' % ( ' date | run# | zone I | zone II | zone III | start | sp | cool|')) f.write(hline) f.write('\t%s' % (' 0.0')) f.write(vline) f.write(hline) for k in range(len(Top)): for t in range(low, Top[k]+Int[k], Int[k]): if iz == 0: Tzero.append(t) # zero field first step f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) if len(Tzero) > 1: f.write('%s \t %s' % ('P', str(Tzero[-2])+'.'+str(2))) f.write(vline) f.write(hline) iz = 1 # infield after zero field first f.write('%s \t %s' % ('I', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) # f.write('%s \t %s'%('T',str(t)+'.'+str(3))) # print second zero field (tail check) # f.write(vline) # f.write(hline) elif iz == 1: # infield first step f.write('%s \t %s' % ('I', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) iz = 0 # zero field step (after infield) f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) try: low = Top[k]+Int[k+1] # increment to next temp step except: f.close() print("output stored in: chart.txt")
Makes a chart for performing IZZI experiments. Print out the file and tape it to the oven. This chart will help keep track of the different steps. Z : performed in zero field - enter the temperature XXX.0 in the sio formatted measurement file created by the LabView program I : performed in the lab field written at the top of the form P : a pTRM step - performed at the temperature and in the lab field. Parameters __________ Int : list of intervals [e.g., 50,10,5] Top : list of upper bounds for each interval [e.g., 500, 550, 600] start : first temperature step, default is 100 outfile : name of output file, default is 'chart.txt' Output _________ creates a file with: file: write down the name of the measurement file field: write down the lab field for the infield steps (in uT) the type of step (Z: zerofield, I: infield, P: pTRM step temperature of the step and code for SIO-like treatment steps XXX.0 [zero field] XXX.1 [in field] XXX.2 [pTRM check] - done in a lab field date : date the step was performed run # : an optional run number zones I-III : field in the zones in the oven start : time the run was started sp : time the setpoint was reached cool : time cooling started
Below is the the instruction that describes the task: ### Input: Makes a chart for performing IZZI experiments. Print out the file and tape it to the oven. This chart will help keep track of the different steps. Z : performed in zero field - enter the temperature XXX.0 in the sio formatted measurement file created by the LabView program I : performed in the lab field written at the top of the form P : a pTRM step - performed at the temperature and in the lab field. Parameters __________ Int : list of intervals [e.g., 50,10,5] Top : list of upper bounds for each interval [e.g., 500, 550, 600] start : first temperature step, default is 100 outfile : name of output file, default is 'chart.txt' Output _________ creates a file with: file: write down the name of the measurement file field: write down the lab field for the infield steps (in uT) the type of step (Z: zerofield, I: infield, P: pTRM step temperature of the step and code for SIO-like treatment steps XXX.0 [zero field] XXX.1 [in field] XXX.2 [pTRM check] - done in a lab field date : date the step was performed run # : an optional run number zones I-III : field in the zones in the oven start : time the run was started sp : time the setpoint was reached cool : time cooling started ### Response: def chart_maker(Int, Top, start=100, outfile='chart.txt'): """ Makes a chart for performing IZZI experiments. Print out the file and tape it to the oven. This chart will help keep track of the different steps. Z : performed in zero field - enter the temperature XXX.0 in the sio formatted measurement file created by the LabView program I : performed in the lab field written at the top of the form P : a pTRM step - performed at the temperature and in the lab field. Parameters __________ Int : list of intervals [e.g., 50,10,5] Top : list of upper bounds for each interval [e.g., 500, 550, 600] start : first temperature step, default is 100 outfile : name of output file, default is 'chart.txt' Output _________ creates a file with: file: write down the name of the measurement file field: write down the lab field for the infield steps (in uT) the type of step (Z: zerofield, I: infield, P: pTRM step temperature of the step and code for SIO-like treatment steps XXX.0 [zero field] XXX.1 [in field] XXX.2 [pTRM check] - done in a lab field date : date the step was performed run # : an optional run number zones I-III : field in the zones in the oven start : time the run was started sp : time the setpoint was reached cool : time cooling started """ low, k, iz = start, 0, 0 Tzero = [] f = open('chart.txt', 'w') vline = '\t%s\n' % ( ' | | | | | | | |') hline = '______________________________________________________________________________\n' f.write('file:_________________ field:___________uT\n\n\n') f.write('%s\n' % ( ' date | run# | zone I | zone II | zone III | start | sp | cool|')) f.write(hline) f.write('\t%s' % (' 0.0')) f.write(vline) f.write(hline) for k in range(len(Top)): for t in range(low, Top[k]+Int[k], Int[k]): if iz == 0: Tzero.append(t) # zero field first step f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) if len(Tzero) > 1: f.write('%s \t %s' % ('P', str(Tzero[-2])+'.'+str(2))) f.write(vline) f.write(hline) iz = 1 # infield after zero field first f.write('%s \t %s' % ('I', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) # f.write('%s \t %s'%('T',str(t)+'.'+str(3))) # print second zero field (tail check) # f.write(vline) # f.write(hline) elif iz == 1: # infield first step f.write('%s \t %s' % ('I', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) iz = 0 # zero field step (after infield) f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) try: low = Top[k]+Int[k+1] # increment to next temp step except: f.close() print("output stored in: chart.txt")
def download(self, obj, path=None, show_progress=True, resume=True, auto_retry=True, proapi=False): """ Download a file :param obj: :class:`.File` object :param str path: local path :param bool show_progress: whether to show download progress :param bool resume: whether to resume on unfinished downloads identified by filename :param bool auto_retry: whether to retry automatically upon closed transfer until the file's download is finished :param bool proapi: whether to use pro API """ url = obj.get_download_url(proapi) download(url, path=path, session=self.http.session, show_progress=show_progress, resume=resume, auto_retry=auto_retry)
Download a file :param obj: :class:`.File` object :param str path: local path :param bool show_progress: whether to show download progress :param bool resume: whether to resume on unfinished downloads identified by filename :param bool auto_retry: whether to retry automatically upon closed transfer until the file's download is finished :param bool proapi: whether to use pro API
Below is the the instruction that describes the task: ### Input: Download a file :param obj: :class:`.File` object :param str path: local path :param bool show_progress: whether to show download progress :param bool resume: whether to resume on unfinished downloads identified by filename :param bool auto_retry: whether to retry automatically upon closed transfer until the file's download is finished :param bool proapi: whether to use pro API ### Response: def download(self, obj, path=None, show_progress=True, resume=True, auto_retry=True, proapi=False): """ Download a file :param obj: :class:`.File` object :param str path: local path :param bool show_progress: whether to show download progress :param bool resume: whether to resume on unfinished downloads identified by filename :param bool auto_retry: whether to retry automatically upon closed transfer until the file's download is finished :param bool proapi: whether to use pro API """ url = obj.get_download_url(proapi) download(url, path=path, session=self.http.session, show_progress=show_progress, resume=resume, auto_retry=auto_retry)
def convert_field(self, value, conversion): """ Define some extra field conversion functions. """ try: # If the normal behaviour works, do it. s = super(CustomFormatter, self) return s.convert_field(value, conversion) except ValueError: funcs = {'s': str, # Default. 'r': repr, # Default. 'a': ascii, # Default. 'u': str.upper, 'l': str.lower, 'c': str.capitalize, 't': str.title, 'm': np.mean, 'µ': np.mean, 'v': np.var, 'd': np.std, '+': np.sum, '∑': np.sum, 'x': np.product, } return funcs.get(conversion)(value)
Define some extra field conversion functions.
Below is the the instruction that describes the task: ### Input: Define some extra field conversion functions. ### Response: def convert_field(self, value, conversion): """ Define some extra field conversion functions. """ try: # If the normal behaviour works, do it. s = super(CustomFormatter, self) return s.convert_field(value, conversion) except ValueError: funcs = {'s': str, # Default. 'r': repr, # Default. 'a': ascii, # Default. 'u': str.upper, 'l': str.lower, 'c': str.capitalize, 't': str.title, 'm': np.mean, 'µ': np.mean, 'v': np.var, 'd': np.std, '+': np.sum, '∑': np.sum, 'x': np.product, } return funcs.get(conversion)(value)
def tas2eas(Vtas, H): """True Airspeed to Equivalent Airspeed""" rho = density(H) Veas = Vtas * np.sqrt(rho/rho0) return Veas
True Airspeed to Equivalent Airspeed
Below is the the instruction that describes the task: ### Input: True Airspeed to Equivalent Airspeed ### Response: def tas2eas(Vtas, H): """True Airspeed to Equivalent Airspeed""" rho = density(H) Veas = Vtas * np.sqrt(rho/rho0) return Veas
def _Download(campaign, subcampaign): ''' Download all stars from a given campaign. This is called from ``missions/k2/download.pbs`` ''' # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all star IDs for this campaign stars = [s[0] for s in GetK2Campaign(campaign)] nstars = len(stars) # Download the TPF data for each one for i, EPIC in enumerate(stars): print("Downloading data for EPIC %d (%d/%d)..." % (EPIC, i + 1, nstars)) if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz')): try: GetData(EPIC, season=campaign, download_only=True) except KeyboardInterrupt: sys.exit() except: # Some targets could be corrupted... print("ERROR downloading EPIC %d." % EPIC) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') print(ln) continue
Download all stars from a given campaign. This is called from ``missions/k2/download.pbs``
Below is the the instruction that describes the task: ### Input: Download all stars from a given campaign. This is called from ``missions/k2/download.pbs`` ### Response: def _Download(campaign, subcampaign): ''' Download all stars from a given campaign. This is called from ``missions/k2/download.pbs`` ''' # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all star IDs for this campaign stars = [s[0] for s in GetK2Campaign(campaign)] nstars = len(stars) # Download the TPF data for each one for i, EPIC in enumerate(stars): print("Downloading data for EPIC %d (%d/%d)..." % (EPIC, i + 1, nstars)) if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz')): try: GetData(EPIC, season=campaign, download_only=True) except KeyboardInterrupt: sys.exit() except: # Some targets could be corrupted... print("ERROR downloading EPIC %d." % EPIC) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') print(ln) continue
def delete_refresh_token(self, refresh_token): """ Deletes a refresh token after use :param refresh_token: The refresh token to delete. """ access_token = self.fetch_by_refresh_token(refresh_token) self.mc.delete(self._generate_cache_key(access_token.token)) self.mc.delete(self._generate_cache_key(refresh_token))
Deletes a refresh token after use :param refresh_token: The refresh token to delete.
Below is the the instruction that describes the task: ### Input: Deletes a refresh token after use :param refresh_token: The refresh token to delete. ### Response: def delete_refresh_token(self, refresh_token): """ Deletes a refresh token after use :param refresh_token: The refresh token to delete. """ access_token = self.fetch_by_refresh_token(refresh_token) self.mc.delete(self._generate_cache_key(access_token.token)) self.mc.delete(self._generate_cache_key(refresh_token))
def mixins(self, name): """ Search mixins for name. Allow '>' to be ignored. '.a .b()' == '.a > .b()' Args: name (string): Search term Returns: Mixin object list OR False """ m = self._smixins(name) if m: return m return self._smixins(name.replace('?>?', ' '))
Search mixins for name. Allow '>' to be ignored. '.a .b()' == '.a > .b()' Args: name (string): Search term Returns: Mixin object list OR False
Below is the the instruction that describes the task: ### Input: Search mixins for name. Allow '>' to be ignored. '.a .b()' == '.a > .b()' Args: name (string): Search term Returns: Mixin object list OR False ### Response: def mixins(self, name): """ Search mixins for name. Allow '>' to be ignored. '.a .b()' == '.a > .b()' Args: name (string): Search term Returns: Mixin object list OR False """ m = self._smixins(name) if m: return m return self._smixins(name.replace('?>?', ' '))
def general_setting(key, default=None, expected_type=None, qsettings=None): """Helper function to get a value from settings. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object Note: The API for QSettings to get a value is different for PyQt and Qt C++. In PyQt we can specify the expected type. See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value """ if qsettings is None: qsettings = QSettings() try: if isinstance(expected_type, type): return qsettings.value(key, default, type=expected_type) else: return qsettings.value(key, default) except TypeError as e: LOGGER.debug('exception %s' % e) LOGGER.debug('%s %s %s' % (key, default, expected_type)) return qsettings.value(key, default)
Helper function to get a value from settings. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object Note: The API for QSettings to get a value is different for PyQt and Qt C++. In PyQt we can specify the expected type. See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value
Below is the the instruction that describes the task: ### Input: Helper function to get a value from settings. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object Note: The API for QSettings to get a value is different for PyQt and Qt C++. In PyQt we can specify the expected type. See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value ### Response: def general_setting(key, default=None, expected_type=None, qsettings=None): """Helper function to get a value from settings. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object Note: The API for QSettings to get a value is different for PyQt and Qt C++. In PyQt we can specify the expected type. See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value """ if qsettings is None: qsettings = QSettings() try: if isinstance(expected_type, type): return qsettings.value(key, default, type=expected_type) else: return qsettings.value(key, default) except TypeError as e: LOGGER.debug('exception %s' % e) LOGGER.debug('%s %s %s' % (key, default, expected_type)) return qsettings.value(key, default)
def add_slave(self, slave, container_name="widget"): """Add a slave delegate """ cont = getattr(self, container_name, None) if cont is None: raise AttributeError( 'Container name must be a member of the delegate') cont.add(slave.widget) self.slaves.append(slave) return slave
Add a slave delegate
Below is the the instruction that describes the task: ### Input: Add a slave delegate ### Response: def add_slave(self, slave, container_name="widget"): """Add a slave delegate """ cont = getattr(self, container_name, None) if cont is None: raise AttributeError( 'Container name must be a member of the delegate') cont.add(slave.widget) self.slaves.append(slave) return slave
def load_data(): '''Load dataset, use boston dataset''' boston = load_boston() X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=99, test_size=0.25) #normalize data ss_X = StandardScaler() ss_y = StandardScaler() X_train = ss_X.fit_transform(X_train) X_test = ss_X.transform(X_test) y_train = ss_y.fit_transform(y_train[:, None])[:,0] y_test = ss_y.transform(y_test[:, None])[:,0] return X_train, X_test, y_train, y_test
Load dataset, use boston dataset
Below is the the instruction that describes the task: ### Input: Load dataset, use boston dataset ### Response: def load_data(): '''Load dataset, use boston dataset''' boston = load_boston() X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=99, test_size=0.25) #normalize data ss_X = StandardScaler() ss_y = StandardScaler() X_train = ss_X.fit_transform(X_train) X_test = ss_X.transform(X_test) y_train = ss_y.fit_transform(y_train[:, None])[:,0] y_test = ss_y.transform(y_test[:, None])[:,0] return X_train, X_test, y_train, y_test
def time(self, t): """Convert any timestamp into a datetime and save as _time""" _time = arrow.get(t).format('YYYY-MM-DDTHH:mm:ss') self._time = datetime.datetime.strptime(_time, '%Y-%m-%dT%H:%M:%S')
Convert any timestamp into a datetime and save as _time
Below is the the instruction that describes the task: ### Input: Convert any timestamp into a datetime and save as _time ### Response: def time(self, t): """Convert any timestamp into a datetime and save as _time""" _time = arrow.get(t).format('YYYY-MM-DDTHH:mm:ss') self._time = datetime.datetime.strptime(_time, '%Y-%m-%dT%H:%M:%S')
def _uncached_match(self, text, pos, cache, error): """Return length of match, ``None`` if no match.""" m = self.re.match(text, pos) if m is not None: span = m.span() node = RegexNode(self, text, pos, pos + span[1] - span[0]) node.match = m # TODO: A terrible idea for cache size? return node
Return length of match, ``None`` if no match.
Below is the the instruction that describes the task: ### Input: Return length of match, ``None`` if no match. ### Response: def _uncached_match(self, text, pos, cache, error): """Return length of match, ``None`` if no match.""" m = self.re.match(text, pos) if m is not None: span = m.span() node = RegexNode(self, text, pos, pos + span[1] - span[0]) node.match = m # TODO: A terrible idea for cache size? return node
def moment(inlist, moment=1): """ Calculates the nth moment about the mean for a sample (defaults to the 1st moment). Used to calculate coefficients of skewness and kurtosis. Usage: lmoment(inlist,moment=1) Returns: appropriate moment (r) from pyLibrary. 1/n * SUM((inlist(i)-mean)**r) """ if moment == 1: return 0.0 else: mn = mean(inlist) n = len(inlist) s = 0 for x in inlist: s = s + (x - mn) ** moment return s / float(n)
Calculates the nth moment about the mean for a sample (defaults to the 1st moment). Used to calculate coefficients of skewness and kurtosis. Usage: lmoment(inlist,moment=1) Returns: appropriate moment (r) from pyLibrary. 1/n * SUM((inlist(i)-mean)**r)
Below is the the instruction that describes the task: ### Input: Calculates the nth moment about the mean for a sample (defaults to the 1st moment). Used to calculate coefficients of skewness and kurtosis. Usage: lmoment(inlist,moment=1) Returns: appropriate moment (r) from pyLibrary. 1/n * SUM((inlist(i)-mean)**r) ### Response: def moment(inlist, moment=1): """ Calculates the nth moment about the mean for a sample (defaults to the 1st moment). Used to calculate coefficients of skewness and kurtosis. Usage: lmoment(inlist,moment=1) Returns: appropriate moment (r) from pyLibrary. 1/n * SUM((inlist(i)-mean)**r) """ if moment == 1: return 0.0 else: mn = mean(inlist) n = len(inlist) s = 0 for x in inlist: s = s + (x - mn) ** moment return s / float(n)
def _proj_l1_scalar_root(v, gamma): r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`. There is no `axis` parameter since the algorithm for computing the solution treats the input `v` as a single vector. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` Returns ------- x : ndarray Output array """ if norm_l1(v) <= gamma: return v else: av = np.abs(v) fn = lambda t: np.sum(np.maximum(0, av - t)) - gamma t = optim.brentq(fn, 0, av.max()) return prox_l1(v, t)
r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`. There is no `axis` parameter since the algorithm for computing the solution treats the input `v` as a single vector. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` Returns ------- x : ndarray Output array
Below is the the instruction that describes the task: ### Input: r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`. There is no `axis` parameter since the algorithm for computing the solution treats the input `v` as a single vector. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` Returns ------- x : ndarray Output array ### Response: def _proj_l1_scalar_root(v, gamma): r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`. There is no `axis` parameter since the algorithm for computing the solution treats the input `v` as a single vector. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` Returns ------- x : ndarray Output array """ if norm_l1(v) <= gamma: return v else: av = np.abs(v) fn = lambda t: np.sum(np.maximum(0, av - t)) - gamma t = optim.brentq(fn, 0, av.max()) return prox_l1(v, t)
def knx_to_date(knxdata): """Convert a 3 byte KNX data object to a date""" if len(knxdata) != 3: raise KNXException("Can only convert a 3 Byte object to date") year = knxdata[2] if year >= 90: year += 1900 else: year += 2000 return date(year, knxdata[1], knxdata[0])
Convert a 3 byte KNX data object to a date
Below is the the instruction that describes the task: ### Input: Convert a 3 byte KNX data object to a date ### Response: def knx_to_date(knxdata): """Convert a 3 byte KNX data object to a date""" if len(knxdata) != 3: raise KNXException("Can only convert a 3 Byte object to date") year = knxdata[2] if year >= 90: year += 1900 else: year += 2000 return date(year, knxdata[1], knxdata[0])
def get_data_home(data_home=None): """Return the path of the arviz data dir. This folder is used by some dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'arviz_data' in the user home folder. Alternatively, it can be set by the 'ARVIZ_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str | None The path to arviz data dir. """ if data_home is None: data_home = os.environ.get("ARVIZ_DATA", os.path.join("~", "arviz_data")) data_home = os.path.expanduser(data_home) if not os.path.exists(data_home): os.makedirs(data_home) return data_home
Return the path of the arviz data dir. This folder is used by some dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'arviz_data' in the user home folder. Alternatively, it can be set by the 'ARVIZ_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str | None The path to arviz data dir.
Below is the the instruction that describes the task: ### Input: Return the path of the arviz data dir. This folder is used by some dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'arviz_data' in the user home folder. Alternatively, it can be set by the 'ARVIZ_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str | None The path to arviz data dir. ### Response: def get_data_home(data_home=None): """Return the path of the arviz data dir. This folder is used by some dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'arviz_data' in the user home folder. Alternatively, it can be set by the 'ARVIZ_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str | None The path to arviz data dir. """ if data_home is None: data_home = os.environ.get("ARVIZ_DATA", os.path.join("~", "arviz_data")) data_home = os.path.expanduser(data_home) if not os.path.exists(data_home): os.makedirs(data_home) return data_home
def run(self) -> None: """Thread run method implementation.""" while True: request = self.input_queue.get() response = self._handle_request(request) self.output_queue.put(response)
Thread run method implementation.
Below is the the instruction that describes the task: ### Input: Thread run method implementation. ### Response: def run(self) -> None: """Thread run method implementation.""" while True: request = self.input_queue.get() response = self._handle_request(request) self.output_queue.put(response)
def save_method_args(method): """ Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args () """ args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') @functools.wraps(method) def wrapper(self, *args, **kwargs): attr_name = '_saved_' + method.__name__ attr = args_and_kwargs(args, kwargs) setattr(self, attr_name, attr) return method(self, *args, **kwargs) return wrapper
Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args ()
Below is the the instruction that describes the task: ### Input: Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args () ### Response: def save_method_args(method): """ Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args () """ args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') @functools.wraps(method) def wrapper(self, *args, **kwargs): attr_name = '_saved_' + method.__name__ attr = args_and_kwargs(args, kwargs) setattr(self, attr_name, attr) return method(self, *args, **kwargs) return wrapper
def subseparable_conv(inputs, filters, kernel_size, **kwargs): """Sub-separable convolution. If separability == 0 it's a separable_conv.""" def conv_fn(inputs, filters, kernel_size, **kwargs): """Sub-separable convolution, splits into separability-many blocks.""" separability = None if "separability" in kwargs: separability = kwargs.pop("separability") if separability: parts = [] abs_sep = separability if separability > 0 else -1 * separability for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)): with tf.variable_scope("part_%d" % split_idx): if separability > 0: parts.append( layers().Conv2D(filters // separability, kernel_size, **kwargs)(split)) else: parts.append( layers().SeparableConv2D(filters // abs_sep, kernel_size, **kwargs)(split)) if separability > 1: result = layers().Conv2D(filters, (1, 1))(tf.concat(parts, axis=3)) elif abs_sep == 1: # If we have just one block, return it. assert len(parts) == 1 result = parts[0] else: result = tf.concat(parts, axis=3) else: result = layers().SeparableConv2D(filters, kernel_size, **kwargs)(inputs) if separability is not None: kwargs["separability"] = separability return result return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
Sub-separable convolution. If separability == 0 it's a separable_conv.
Below is the the instruction that describes the task: ### Input: Sub-separable convolution. If separability == 0 it's a separable_conv. ### Response: def subseparable_conv(inputs, filters, kernel_size, **kwargs): """Sub-separable convolution. If separability == 0 it's a separable_conv.""" def conv_fn(inputs, filters, kernel_size, **kwargs): """Sub-separable convolution, splits into separability-many blocks.""" separability = None if "separability" in kwargs: separability = kwargs.pop("separability") if separability: parts = [] abs_sep = separability if separability > 0 else -1 * separability for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)): with tf.variable_scope("part_%d" % split_idx): if separability > 0: parts.append( layers().Conv2D(filters // separability, kernel_size, **kwargs)(split)) else: parts.append( layers().SeparableConv2D(filters // abs_sep, kernel_size, **kwargs)(split)) if separability > 1: result = layers().Conv2D(filters, (1, 1))(tf.concat(parts, axis=3)) elif abs_sep == 1: # If we have just one block, return it. assert len(parts) == 1 result = parts[0] else: result = tf.concat(parts, axis=3) else: result = layers().SeparableConv2D(filters, kernel_size, **kwargs)(inputs) if separability is not None: kwargs["separability"] = separability return result return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
def read_sha1( file_path, buf_size = None, start_byte = 0, read_size = None, extra_hashers = [], # update(data) will be called on all of these ): ''' Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory ''' read_size = read_size or os.stat(file_path).st_size buf_size = buf_size or DEFAULT_BUFFER_SIZE data_read = 0 total_sha1 = hashlib.sha1() while data_read < read_size: with open( file_path, 'rb', buffering = 0 ) as f: f.seek( start_byte ) data = f.read( min(buf_size, read_size - data_read) ) assert( len(data) > 0 ) total_sha1.update( data ) for hasher in extra_hashers: hasher.update( data ) data_read += len(data) start_byte += len(data) assert( data_read == read_size ) return total_sha1
Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory
Below is the the instruction that describes the task: ### Input: Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory ### Response: def read_sha1( file_path, buf_size = None, start_byte = 0, read_size = None, extra_hashers = [], # update(data) will be called on all of these ): ''' Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory ''' read_size = read_size or os.stat(file_path).st_size buf_size = buf_size or DEFAULT_BUFFER_SIZE data_read = 0 total_sha1 = hashlib.sha1() while data_read < read_size: with open( file_path, 'rb', buffering = 0 ) as f: f.seek( start_byte ) data = f.read( min(buf_size, read_size - data_read) ) assert( len(data) > 0 ) total_sha1.update( data ) for hasher in extra_hashers: hasher.update( data ) data_read += len(data) start_byte += len(data) assert( data_read == read_size ) return total_sha1
def labelForAction(self, action): """ Returns the label that contains the inputed action. :return <XDockActionLabel> || None """ for label in self.actionLabels(): if label.action() == action: return label return None
Returns the label that contains the inputed action. :return <XDockActionLabel> || None
Below is the the instruction that describes the task: ### Input: Returns the label that contains the inputed action. :return <XDockActionLabel> || None ### Response: def labelForAction(self, action): """ Returns the label that contains the inputed action. :return <XDockActionLabel> || None """ for label in self.actionLabels(): if label.action() == action: return label return None
def _determine_spec(self, index): """ Determine how a value for a field should be constructed :param index: The field number :return: A tuple containing the following elements: - unicode string of the field name - Asn1Value class of the field spec - Asn1Value class of the value spec - None or dict of params to pass to the field spec - None or Asn1Value class indicating the value spec was derived from an OID or a spec callback """ name, field_spec, field_params = self._fields[index] value_spec = field_spec spec_override = None if self._spec_callbacks is not None and name in self._spec_callbacks: callback = self._spec_callbacks[name] spec_override = callback(self) if spec_override: # Allow a spec callback to specify both the base spec and # the override, for situations such as OctetString and parse_as if spec_override.__class__ == tuple and len(spec_override) == 2: field_spec, value_spec = spec_override if value_spec is None: value_spec = field_spec spec_override = None # When no field spec is specified, use a single return value as that elif field_spec is None: field_spec = spec_override value_spec = field_spec spec_override = None else: value_spec = spec_override elif self._oid_nums is not None and self._oid_nums[1] == index: oid = self._lazy_child(self._oid_nums[0]).native if oid in self._oid_specs: spec_override = self._oid_specs[oid] value_spec = spec_override return (name, field_spec, value_spec, field_params, spec_override)
Determine how a value for a field should be constructed :param index: The field number :return: A tuple containing the following elements: - unicode string of the field name - Asn1Value class of the field spec - Asn1Value class of the value spec - None or dict of params to pass to the field spec - None or Asn1Value class indicating the value spec was derived from an OID or a spec callback
Below is the the instruction that describes the task: ### Input: Determine how a value for a field should be constructed :param index: The field number :return: A tuple containing the following elements: - unicode string of the field name - Asn1Value class of the field spec - Asn1Value class of the value spec - None or dict of params to pass to the field spec - None or Asn1Value class indicating the value spec was derived from an OID or a spec callback ### Response: def _determine_spec(self, index): """ Determine how a value for a field should be constructed :param index: The field number :return: A tuple containing the following elements: - unicode string of the field name - Asn1Value class of the field spec - Asn1Value class of the value spec - None or dict of params to pass to the field spec - None or Asn1Value class indicating the value spec was derived from an OID or a spec callback """ name, field_spec, field_params = self._fields[index] value_spec = field_spec spec_override = None if self._spec_callbacks is not None and name in self._spec_callbacks: callback = self._spec_callbacks[name] spec_override = callback(self) if spec_override: # Allow a spec callback to specify both the base spec and # the override, for situations such as OctetString and parse_as if spec_override.__class__ == tuple and len(spec_override) == 2: field_spec, value_spec = spec_override if value_spec is None: value_spec = field_spec spec_override = None # When no field spec is specified, use a single return value as that elif field_spec is None: field_spec = spec_override value_spec = field_spec spec_override = None else: value_spec = spec_override elif self._oid_nums is not None and self._oid_nums[1] == index: oid = self._lazy_child(self._oid_nums[0]).native if oid in self._oid_specs: spec_override = self._oid_specs[oid] value_spec = spec_override return (name, field_spec, value_spec, field_params, spec_override)
def update(self, **args): """ Updates a Clip. Parameters: - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/clip.md """ # JSONify our data. data = json.dumps(args) r = requests.put( "https://kippt.com/api/clips/%s" % (self.id), headers=self.kippt.header, data=data) return (r.json())
Updates a Clip. Parameters: - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/clip.md
Below is the the instruction that describes the task: ### Input: Updates a Clip. Parameters: - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/clip.md ### Response: def update(self, **args): """ Updates a Clip. Parameters: - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/clip.md """ # JSONify our data. data = json.dumps(args) r = requests.put( "https://kippt.com/api/clips/%s" % (self.id), headers=self.kippt.header, data=data) return (r.json())
def override_supported_formats(formats): """ Override the views class' supported formats for the decorated function. Arguments: formats -- A list of strings describing formats, e.g. ``['html', 'json']``. """ def decorator(function): @wraps(function) def wrapper(self, *args, **kwargs): self.supported_formats = formats return function(self, *args, **kwargs) return wrapper return decorator
Override the views class' supported formats for the decorated function. Arguments: formats -- A list of strings describing formats, e.g. ``['html', 'json']``.
Below is the the instruction that describes the task: ### Input: Override the views class' supported formats for the decorated function. Arguments: formats -- A list of strings describing formats, e.g. ``['html', 'json']``. ### Response: def override_supported_formats(formats): """ Override the views class' supported formats for the decorated function. Arguments: formats -- A list of strings describing formats, e.g. ``['html', 'json']``. """ def decorator(function): @wraps(function) def wrapper(self, *args, **kwargs): self.supported_formats = formats return function(self, *args, **kwargs) return wrapper return decorator
def calculate_reduced_matrix_elements(fine_states, convention=1): r"""Calculate the reduced matrix elements for a list of fine states. This function calculates the reduced matrix elments .. math:: \langle N,L,J||T^1(r)||N',L',J'\rangle given a list of fine states. We calculate the reduced matrix elements found in [SteckRb87]_ for the \ D1 and D2 lines in rubidium. >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> e1 = State("Rb", 87, 5, 1, 1/Integer(2)) >>> e2 = State("Rb", 87,5 , 1, 3/Integer(2)) >>> red = calculate_reduced_matrix_elements([g, e1, e2], convention=2) >>> print(red[0][1]) 2.99207750426 >>> print(red[0][2]) 4.22698361868 """ reduced_matrix_elements = [[reduced_matrix_element(ei, ej, convention=convention) for ej in fine_states] for ei in fine_states] return reduced_matrix_elements
r"""Calculate the reduced matrix elements for a list of fine states. This function calculates the reduced matrix elments .. math:: \langle N,L,J||T^1(r)||N',L',J'\rangle given a list of fine states. We calculate the reduced matrix elements found in [SteckRb87]_ for the \ D1 and D2 lines in rubidium. >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> e1 = State("Rb", 87, 5, 1, 1/Integer(2)) >>> e2 = State("Rb", 87,5 , 1, 3/Integer(2)) >>> red = calculate_reduced_matrix_elements([g, e1, e2], convention=2) >>> print(red[0][1]) 2.99207750426 >>> print(red[0][2]) 4.22698361868
Below is the the instruction that describes the task: ### Input: r"""Calculate the reduced matrix elements for a list of fine states. This function calculates the reduced matrix elments .. math:: \langle N,L,J||T^1(r)||N',L',J'\rangle given a list of fine states. We calculate the reduced matrix elements found in [SteckRb87]_ for the \ D1 and D2 lines in rubidium. >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> e1 = State("Rb", 87, 5, 1, 1/Integer(2)) >>> e2 = State("Rb", 87,5 , 1, 3/Integer(2)) >>> red = calculate_reduced_matrix_elements([g, e1, e2], convention=2) >>> print(red[0][1]) 2.99207750426 >>> print(red[0][2]) 4.22698361868 ### Response: def calculate_reduced_matrix_elements(fine_states, convention=1): r"""Calculate the reduced matrix elements for a list of fine states. This function calculates the reduced matrix elments .. math:: \langle N,L,J||T^1(r)||N',L',J'\rangle given a list of fine states. We calculate the reduced matrix elements found in [SteckRb87]_ for the \ D1 and D2 lines in rubidium. >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> e1 = State("Rb", 87, 5, 1, 1/Integer(2)) >>> e2 = State("Rb", 87,5 , 1, 3/Integer(2)) >>> red = calculate_reduced_matrix_elements([g, e1, e2], convention=2) >>> print(red[0][1]) 2.99207750426 >>> print(red[0][2]) 4.22698361868 """ reduced_matrix_elements = [[reduced_matrix_element(ei, ej, convention=convention) for ej in fine_states] for ei in fine_states] return reduced_matrix_elements
def delegate_method(other, method, name=None): """Add a method to the current class that delegates to another method. The *other* argument must be a property that returns the instance to delegate to. Due to an implementation detail, the property must be defined in the current class. The *method* argument specifies a method to delegate to. It can be any callable as long as it takes the instances as its first argument. It is a common paradigm in Gruvi to expose protocol methods onto clients. This keeps most of the logic into the protocol, but prevents the user from having to type ``'client.protocol.*methodname*'`` all the time. For example:: class MyClient(Client): protocol = Client.protocol delegate_method(protocol, MyProtocol.method) """ frame = sys._getframe(1) classdict = frame.f_locals @functools.wraps(method) def delegate(self, *args, **kwargs): other_self = other.__get__(self) return method(other_self, *args, **kwargs) if getattr(method, '__switchpoint__', False): delegate.__switchpoint__ = True if name is None: name = method.__name__ propname = None for key in classdict: if classdict[key] is other: propname = key break # If we know the property name, replace the docstring with a small # reference instead of copying the function docstring. if propname: qname = getattr(method, '__qualname__', method.__name__) if '.' in qname: delegate.__doc__ = 'A shorthand for ``self.{propname}.{name}()``.' \ .format(name=name, propname=propname) else: delegate.__doc__ = 'A shorthand for ``{name}({propname}, ...)``.' \ .format(name=name, propname=propname) classdict[name] = delegate
Add a method to the current class that delegates to another method. The *other* argument must be a property that returns the instance to delegate to. Due to an implementation detail, the property must be defined in the current class. The *method* argument specifies a method to delegate to. It can be any callable as long as it takes the instances as its first argument. It is a common paradigm in Gruvi to expose protocol methods onto clients. This keeps most of the logic into the protocol, but prevents the user from having to type ``'client.protocol.*methodname*'`` all the time. For example:: class MyClient(Client): protocol = Client.protocol delegate_method(protocol, MyProtocol.method)
Below is the the instruction that describes the task: ### Input: Add a method to the current class that delegates to another method. The *other* argument must be a property that returns the instance to delegate to. Due to an implementation detail, the property must be defined in the current class. The *method* argument specifies a method to delegate to. It can be any callable as long as it takes the instances as its first argument. It is a common paradigm in Gruvi to expose protocol methods onto clients. This keeps most of the logic into the protocol, but prevents the user from having to type ``'client.protocol.*methodname*'`` all the time. For example:: class MyClient(Client): protocol = Client.protocol delegate_method(protocol, MyProtocol.method) ### Response: def delegate_method(other, method, name=None): """Add a method to the current class that delegates to another method. The *other* argument must be a property that returns the instance to delegate to. Due to an implementation detail, the property must be defined in the current class. The *method* argument specifies a method to delegate to. It can be any callable as long as it takes the instances as its first argument. It is a common paradigm in Gruvi to expose protocol methods onto clients. This keeps most of the logic into the protocol, but prevents the user from having to type ``'client.protocol.*methodname*'`` all the time. For example:: class MyClient(Client): protocol = Client.protocol delegate_method(protocol, MyProtocol.method) """ frame = sys._getframe(1) classdict = frame.f_locals @functools.wraps(method) def delegate(self, *args, **kwargs): other_self = other.__get__(self) return method(other_self, *args, **kwargs) if getattr(method, '__switchpoint__', False): delegate.__switchpoint__ = True if name is None: name = method.__name__ propname = None for key in classdict: if classdict[key] is other: propname = key break # If we know the property name, replace the docstring with a small # reference instead of copying the function docstring. if propname: qname = getattr(method, '__qualname__', method.__name__) if '.' in qname: delegate.__doc__ = 'A shorthand for ``self.{propname}.{name}()``.' \ .format(name=name, propname=propname) else: delegate.__doc__ = 'A shorthand for ``{name}({propname}, ...)``.' \ .format(name=name, propname=propname) classdict[name] = delegate
def file_client(self): ''' Return a file client. Instantiates on first call. ''' if not self._file_client: self._file_client = salt.fileclient.get_file_client( self.opts, self.pillar_rend) return self._file_client
Return a file client. Instantiates on first call.
Below is the the instruction that describes the task: ### Input: Return a file client. Instantiates on first call. ### Response: def file_client(self): ''' Return a file client. Instantiates on first call. ''' if not self._file_client: self._file_client = salt.fileclient.get_file_client( self.opts, self.pillar_rend) return self._file_client
def unpack_hyperopt_vals(vals): """ Unpack values from a hyperopt return dictionary where values are wrapped in a list. :param vals: dict :return: dict copy of the dictionary with unpacked values """ assert isinstance(vals, dict), "Parameter must be given as dict." ret = {} for k, v in list(vals.items()): try: ret[k] = v[0] except (TypeError, IndexError): ret[k] = v return ret
Unpack values from a hyperopt return dictionary where values are wrapped in a list. :param vals: dict :return: dict copy of the dictionary with unpacked values
Below is the the instruction that describes the task: ### Input: Unpack values from a hyperopt return dictionary where values are wrapped in a list. :param vals: dict :return: dict copy of the dictionary with unpacked values ### Response: def unpack_hyperopt_vals(vals): """ Unpack values from a hyperopt return dictionary where values are wrapped in a list. :param vals: dict :return: dict copy of the dictionary with unpacked values """ assert isinstance(vals, dict), "Parameter must be given as dict." ret = {} for k, v in list(vals.items()): try: ret[k] = v[0] except (TypeError, IndexError): ret[k] = v return ret
def writeDefinition(self, f, name): """Writes the string table to a file as a C const char array. This writes out the string table as one single C char array for memory size reasons, separating the individual strings with '\0' characters. This way we can index directly into the string array and avoid the additional storage costs for the pointers to them (and potential extra relocations for those). :param f: the output stream. :param name: the name of the output array. """ entries = self.table.items() entries.sort(key=lambda x: x[1]) # Avoid null-in-string warnings with GCC and potentially # overlong string constants; write everything out the long way. def explodeToCharArray(string): def toCChar(s): if s == "'": return "'\\''" else: return "'%s'" % s return ", ".join(map(toCChar, string)) f.write("const char %s[] = {\n" % name) for (string, offset) in entries: if "*/" in string: raise ValueError("String in string table contains unexpected sequence '*/': %s" % string) e = explodeToCharArray(string) if e: f.write(" /* %5d - \"%s\" */ %s, '\\0',\n" % (offset, string, explodeToCharArray(string))) else: f.write(" /* %5d - \"%s\" */ '\\0',\n" % (offset, string)) f.write("};\n\n")
Writes the string table to a file as a C const char array. This writes out the string table as one single C char array for memory size reasons, separating the individual strings with '\0' characters. This way we can index directly into the string array and avoid the additional storage costs for the pointers to them (and potential extra relocations for those). :param f: the output stream. :param name: the name of the output array.
Below is the the instruction that describes the task: ### Input: Writes the string table to a file as a C const char array. This writes out the string table as one single C char array for memory size reasons, separating the individual strings with '\0' characters. This way we can index directly into the string array and avoid the additional storage costs for the pointers to them (and potential extra relocations for those). :param f: the output stream. :param name: the name of the output array. ### Response: def writeDefinition(self, f, name): """Writes the string table to a file as a C const char array. This writes out the string table as one single C char array for memory size reasons, separating the individual strings with '\0' characters. This way we can index directly into the string array and avoid the additional storage costs for the pointers to them (and potential extra relocations for those). :param f: the output stream. :param name: the name of the output array. """ entries = self.table.items() entries.sort(key=lambda x: x[1]) # Avoid null-in-string warnings with GCC and potentially # overlong string constants; write everything out the long way. def explodeToCharArray(string): def toCChar(s): if s == "'": return "'\\''" else: return "'%s'" % s return ", ".join(map(toCChar, string)) f.write("const char %s[] = {\n" % name) for (string, offset) in entries: if "*/" in string: raise ValueError("String in string table contains unexpected sequence '*/': %s" % string) e = explodeToCharArray(string) if e: f.write(" /* %5d - \"%s\" */ %s, '\\0',\n" % (offset, string, explodeToCharArray(string))) else: f.write(" /* %5d - \"%s\" */ '\\0',\n" % (offset, string)) f.write("};\n\n")
def remove_policy(self, index=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html>`_ :arg index: The name of the index to remove policy on """ return self.transport.perform_request( "POST", _make_path(index, "_ilm", "remove"), params=params )
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html>`_ :arg index: The name of the index to remove policy on
Below is the the instruction that describes the task: ### Input: `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html>`_ :arg index: The name of the index to remove policy on ### Response: def remove_policy(self, index=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html>`_ :arg index: The name of the index to remove policy on """ return self.transport.perform_request( "POST", _make_path(index, "_ilm", "remove"), params=params )
def export_urdf(mesh, directory, scale=1.0, color=[0.75, 0.75, 0.75], **kwargs): """ Convert a Trimesh object into a URDF package for physics simulation. This breaks the mesh into convex pieces and writes them to the same directory as the .urdf file. Parameters --------- mesh : Trimesh object directory : str The directory path for the URDF package Returns --------- mesh : Trimesh object Multi-body mesh containing convex decomposition """ import lxml.etree as et # TODO: fix circular import from .export import export_mesh # Extract the save directory and the file name fullpath = os.path.abspath(directory) name = os.path.basename(fullpath) _, ext = os.path.splitext(name) if ext != '': raise ValueError('URDF path must be a directory!') # Create directory if needed if not os.path.exists(fullpath): os.mkdir(fullpath) elif not os.path.isdir(fullpath): raise ValueError('URDF path must be a directory!') # Perform a convex decomposition try: convex_pieces = convex_decomposition(mesh, **kwargs) if not isinstance(convex_pieces, list): convex_pieces = [convex_pieces] except BaseException: log.error('problem with convex decomposition, using hull', exc_info=True) convex_pieces = [mesh.convex_hull] # Get the effective density of the mesh effective_density = mesh.volume / sum([ m.volume for m in convex_pieces]) # open an XML tree root = et.Element('robot', name='root') # Loop through all pieces, adding each as a link prev_link_name = None for i, piece in enumerate(convex_pieces): # Save each nearly convex mesh out to a file piece_name = '{}_convex_piece_{}'.format(name, i) piece_filename = '{}.obj'.format(piece_name) piece_filepath = os.path.join(fullpath, piece_filename) export_mesh(piece, piece_filepath) # Set the mass properties of the piece piece.center_mass = mesh.center_mass piece.density = effective_density * mesh.density link_name = 'link_{}'.format(piece_name) geom_name = '{}'.format(piece_filename) I = [['{:.2E}'.format(y) for y in x] # NOQA for x in piece.moment_inertia] # Write the link out to the XML Tree link = et.SubElement(root, 'link', name=link_name) # Inertial information inertial = et.SubElement(link, 'inertial') et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") et.SubElement(inertial, 'mass', value='{:.2E}'.format(piece.mass)) et.SubElement( inertial, 'inertia', ixx=I[0][0], ixy=I[0][1], ixz=I[0][2], iyy=I[1][1], iyz=I[1][2], izz=I[2][2]) # Visual Information visual = et.SubElement(link, 'visual') et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(visual, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, scale="{:.4E} {:.4E} {:.4E}".format(scale, scale, scale)) material = et.SubElement(visual, 'material', name='') et.SubElement(material, 'color', rgba="{:.2E} {:.2E} {:.2E} 1".format(color[0], color[1], color[2])) # Collision Information collision = et.SubElement(link, 'collision') et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(collision, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, scale="{:.4E} {:.4E} {:.4E}".format(scale, scale, scale)) # Create rigid joint to previous link if prev_link_name is not None: joint_name = '{}_joint'.format(link_name) joint = et.SubElement(root, 'joint', name=joint_name, type='fixed') et.SubElement(joint, 'origin', xyz="0 0 0", rpy="0 0 0") et.SubElement(joint, 'parent', link=prev_link_name) et.SubElement(joint, 'child', link=link_name) prev_link_name = link_name # Write URDF file tree = et.ElementTree(root) urdf_filename = '{}.urdf'.format(name) tree.write(os.path.join(fullpath, urdf_filename), pretty_print=True) # Write Gazebo config file root = et.Element('model') model = et.SubElement(root, 'name') model.text = name version = et.SubElement(root, 'version') version.text = '1.0' sdf = et.SubElement(root, 'sdf', version='1.4') sdf.text = '{}.urdf'.format(name) author = et.SubElement(root, 'author') et.SubElement(author, 'name').text = 'trimesh {}'.format(trimesh_version) et.SubElement(author, 'email').text = 'blank@blank.blank' description = et.SubElement(root, 'description') description.text = name tree = et.ElementTree(root) tree.write(os.path.join(fullpath, 'model.config')) return np.sum(convex_pieces)
Convert a Trimesh object into a URDF package for physics simulation. This breaks the mesh into convex pieces and writes them to the same directory as the .urdf file. Parameters --------- mesh : Trimesh object directory : str The directory path for the URDF package Returns --------- mesh : Trimesh object Multi-body mesh containing convex decomposition
Below is the the instruction that describes the task: ### Input: Convert a Trimesh object into a URDF package for physics simulation. This breaks the mesh into convex pieces and writes them to the same directory as the .urdf file. Parameters --------- mesh : Trimesh object directory : str The directory path for the URDF package Returns --------- mesh : Trimesh object Multi-body mesh containing convex decomposition ### Response: def export_urdf(mesh, directory, scale=1.0, color=[0.75, 0.75, 0.75], **kwargs): """ Convert a Trimesh object into a URDF package for physics simulation. This breaks the mesh into convex pieces and writes them to the same directory as the .urdf file. Parameters --------- mesh : Trimesh object directory : str The directory path for the URDF package Returns --------- mesh : Trimesh object Multi-body mesh containing convex decomposition """ import lxml.etree as et # TODO: fix circular import from .export import export_mesh # Extract the save directory and the file name fullpath = os.path.abspath(directory) name = os.path.basename(fullpath) _, ext = os.path.splitext(name) if ext != '': raise ValueError('URDF path must be a directory!') # Create directory if needed if not os.path.exists(fullpath): os.mkdir(fullpath) elif not os.path.isdir(fullpath): raise ValueError('URDF path must be a directory!') # Perform a convex decomposition try: convex_pieces = convex_decomposition(mesh, **kwargs) if not isinstance(convex_pieces, list): convex_pieces = [convex_pieces] except BaseException: log.error('problem with convex decomposition, using hull', exc_info=True) convex_pieces = [mesh.convex_hull] # Get the effective density of the mesh effective_density = mesh.volume / sum([ m.volume for m in convex_pieces]) # open an XML tree root = et.Element('robot', name='root') # Loop through all pieces, adding each as a link prev_link_name = None for i, piece in enumerate(convex_pieces): # Save each nearly convex mesh out to a file piece_name = '{}_convex_piece_{}'.format(name, i) piece_filename = '{}.obj'.format(piece_name) piece_filepath = os.path.join(fullpath, piece_filename) export_mesh(piece, piece_filepath) # Set the mass properties of the piece piece.center_mass = mesh.center_mass piece.density = effective_density * mesh.density link_name = 'link_{}'.format(piece_name) geom_name = '{}'.format(piece_filename) I = [['{:.2E}'.format(y) for y in x] # NOQA for x in piece.moment_inertia] # Write the link out to the XML Tree link = et.SubElement(root, 'link', name=link_name) # Inertial information inertial = et.SubElement(link, 'inertial') et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") et.SubElement(inertial, 'mass', value='{:.2E}'.format(piece.mass)) et.SubElement( inertial, 'inertia', ixx=I[0][0], ixy=I[0][1], ixz=I[0][2], iyy=I[1][1], iyz=I[1][2], izz=I[2][2]) # Visual Information visual = et.SubElement(link, 'visual') et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(visual, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, scale="{:.4E} {:.4E} {:.4E}".format(scale, scale, scale)) material = et.SubElement(visual, 'material', name='') et.SubElement(material, 'color', rgba="{:.2E} {:.2E} {:.2E} 1".format(color[0], color[1], color[2])) # Collision Information collision = et.SubElement(link, 'collision') et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(collision, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, scale="{:.4E} {:.4E} {:.4E}".format(scale, scale, scale)) # Create rigid joint to previous link if prev_link_name is not None: joint_name = '{}_joint'.format(link_name) joint = et.SubElement(root, 'joint', name=joint_name, type='fixed') et.SubElement(joint, 'origin', xyz="0 0 0", rpy="0 0 0") et.SubElement(joint, 'parent', link=prev_link_name) et.SubElement(joint, 'child', link=link_name) prev_link_name = link_name # Write URDF file tree = et.ElementTree(root) urdf_filename = '{}.urdf'.format(name) tree.write(os.path.join(fullpath, urdf_filename), pretty_print=True) # Write Gazebo config file root = et.Element('model') model = et.SubElement(root, 'name') model.text = name version = et.SubElement(root, 'version') version.text = '1.0' sdf = et.SubElement(root, 'sdf', version='1.4') sdf.text = '{}.urdf'.format(name) author = et.SubElement(root, 'author') et.SubElement(author, 'name').text = 'trimesh {}'.format(trimesh_version) et.SubElement(author, 'email').text = 'blank@blank.blank' description = et.SubElement(root, 'description') description.text = name tree = et.ElementTree(root) tree.write(os.path.join(fullpath, 'model.config')) return np.sum(convex_pieces)
def localpath(*args): """construct an absolute path from a list relative to the root pycapnp directory""" plist = [ROOT] + list(args) return os.path.abspath(pjoin(*plist))
construct an absolute path from a list relative to the root pycapnp directory
Below is the the instruction that describes the task: ### Input: construct an absolute path from a list relative to the root pycapnp directory ### Response: def localpath(*args): """construct an absolute path from a list relative to the root pycapnp directory""" plist = [ROOT] + list(args) return os.path.abspath(pjoin(*plist))
def format_command( command_args, # type: List[str] command_output, # type: str ): # type: (...) -> str """ Format command information for logging. """ text = 'Command arguments: {}\n'.format(command_args) if not command_output: text += 'Command output: None' elif logger.getEffectiveLevel() > logging.DEBUG: text += 'Command output: [use --verbose to show]' else: if not command_output.endswith('\n'): command_output += '\n' text += ( 'Command output:\n{}' '-----------------------------------------' ).format(command_output) return text
Format command information for logging.
Below is the the instruction that describes the task: ### Input: Format command information for logging. ### Response: def format_command( command_args, # type: List[str] command_output, # type: str ): # type: (...) -> str """ Format command information for logging. """ text = 'Command arguments: {}\n'.format(command_args) if not command_output: text += 'Command output: None' elif logger.getEffectiveLevel() > logging.DEBUG: text += 'Command output: [use --verbose to show]' else: if not command_output.endswith('\n'): command_output += '\n' text += ( 'Command output:\n{}' '-----------------------------------------' ).format(command_output) return text
def connect_handler(self, their_unl, events, force_master, hairpin, nonce): # Figure out who should make the connection. our_unl = self.value.encode("ascii") their_unl = their_unl.encode("ascii") master = self.is_master(their_unl) """ Master defines who connects if either side can. It's used to eliminate having multiple connections with the same host. """ if force_master: master = 1 # Deconstruct binary UNLs into dicts. our_unl = self.deconstruct(our_unl) their_unl = self.deconstruct(their_unl) if our_unl is None: raise Exception("Unable to deconstruct our UNL.") if their_unl is None: raise Exception("Unable to deconstruct their UNL.") # This means the nodes are behind the same router. if our_unl["wan_ip"] == their_unl["wan_ip"]: # Connect to LAN IP. our_unl["wan_ip"] = our_unl["lan_ip"] their_unl["wan_ip"] = their_unl["lan_ip"] # Already behind NAT so no forwarding needed. if hairpin: our_unl["node_type"] = "passive" their_unl["node_type"] = "passive" # Generate con ID. if nonce != "0" * 64: # Convert nonce to bytes. if sys.version_info >= (3, 0, 0): if type(nonce) == str: nonce.encode("ascii") else: if type(nonce) == unicode: nonce = str(nonce) # Check nonce length. assert(len(nonce) == 64) # Create con ID. con_id = self.net.generate_con_id( nonce, our_unl["wan_ip"], their_unl["wan_ip"] ) else: con_id = None # Acquire mutex. self.mutex.acquire() # Wait for other UNLs to finish. end_time = time.time() end_time += len(self.pending_unls) * 60 self.debug_print("Waiting for other unls to finish") while their_unl in self.pending_unls and time.time() < end_time: # This is an undifferentiated duplicate. if events is None: self.mutex.release() return time.sleep(1) self.debug_print("Other unl finished") is_exception = 0 try: # Wait for any other hole punches to finish. if (their_unl["node_type"] == "simultaneous" and our_unl["node_type"] != "passive"): self.pending_sim_open.append(their_unl["value"]) end_time = time.time() end_time += len(self.pending_unls) * 60 self.debug_print("wait for other hole punches to finish") while len(self.pending_sim_open) and time.time() < end_time: if self.pending_sim_open[0] == their_unl["value"]: break time.sleep(1) self.debug_print("other hole punches finished") # Set pending UNL. self.pending_unls.append(their_unl) # Release mutex. self.mutex.release() # Get connection. con = self.get_connection( our_unl, their_unl, master, nonce, force_master, con_id ) except Exception as e: is_exception = 1 print(e) print("EXCEPTION IN UNL.GET_CONNECTION") log_exception("error.log", parse_exception(e)) finally: # Release mutex. if self.mutex.locked() and is_exception: self.mutex.release() # Undo pending connect state. if their_unl in self.pending_unls: self.pending_unls.remove(their_unl) # Undo pending sim open. if len(self.pending_sim_open): if self.pending_sim_open[0] == their_unl["value"]: self.pending_sim_open = self.pending_sim_open[1:] # Only execute events if this function was called manually. if events is not None: # Success. if con is not None: if "success" in events: events["success"](con) # Failure. if con is None: if "failure" in events: events["failure"](con)
Master defines who connects if either side can. It's used to eliminate having multiple connections with the same host.
Below is the the instruction that describes the task: ### Input: Master defines who connects if either side can. It's used to eliminate having multiple connections with the same host. ### Response: def connect_handler(self, their_unl, events, force_master, hairpin, nonce): # Figure out who should make the connection. our_unl = self.value.encode("ascii") their_unl = their_unl.encode("ascii") master = self.is_master(their_unl) """ Master defines who connects if either side can. It's used to eliminate having multiple connections with the same host. """ if force_master: master = 1 # Deconstruct binary UNLs into dicts. our_unl = self.deconstruct(our_unl) their_unl = self.deconstruct(their_unl) if our_unl is None: raise Exception("Unable to deconstruct our UNL.") if their_unl is None: raise Exception("Unable to deconstruct their UNL.") # This means the nodes are behind the same router. if our_unl["wan_ip"] == their_unl["wan_ip"]: # Connect to LAN IP. our_unl["wan_ip"] = our_unl["lan_ip"] their_unl["wan_ip"] = their_unl["lan_ip"] # Already behind NAT so no forwarding needed. if hairpin: our_unl["node_type"] = "passive" their_unl["node_type"] = "passive" # Generate con ID. if nonce != "0" * 64: # Convert nonce to bytes. if sys.version_info >= (3, 0, 0): if type(nonce) == str: nonce.encode("ascii") else: if type(nonce) == unicode: nonce = str(nonce) # Check nonce length. assert(len(nonce) == 64) # Create con ID. con_id = self.net.generate_con_id( nonce, our_unl["wan_ip"], their_unl["wan_ip"] ) else: con_id = None # Acquire mutex. self.mutex.acquire() # Wait for other UNLs to finish. end_time = time.time() end_time += len(self.pending_unls) * 60 self.debug_print("Waiting for other unls to finish") while their_unl in self.pending_unls and time.time() < end_time: # This is an undifferentiated duplicate. if events is None: self.mutex.release() return time.sleep(1) self.debug_print("Other unl finished") is_exception = 0 try: # Wait for any other hole punches to finish. if (their_unl["node_type"] == "simultaneous" and our_unl["node_type"] != "passive"): self.pending_sim_open.append(their_unl["value"]) end_time = time.time() end_time += len(self.pending_unls) * 60 self.debug_print("wait for other hole punches to finish") while len(self.pending_sim_open) and time.time() < end_time: if self.pending_sim_open[0] == their_unl["value"]: break time.sleep(1) self.debug_print("other hole punches finished") # Set pending UNL. self.pending_unls.append(their_unl) # Release mutex. self.mutex.release() # Get connection. con = self.get_connection( our_unl, their_unl, master, nonce, force_master, con_id ) except Exception as e: is_exception = 1 print(e) print("EXCEPTION IN UNL.GET_CONNECTION") log_exception("error.log", parse_exception(e)) finally: # Release mutex. if self.mutex.locked() and is_exception: self.mutex.release() # Undo pending connect state. if their_unl in self.pending_unls: self.pending_unls.remove(their_unl) # Undo pending sim open. if len(self.pending_sim_open): if self.pending_sim_open[0] == their_unl["value"]: self.pending_sim_open = self.pending_sim_open[1:] # Only execute events if this function was called manually. if events is not None: # Success. if con is not None: if "success" in events: events["success"](con) # Failure. if con is None: if "failure" in events: events["failure"](con)
def init(self, project_name='', template='base', quiet=False, debug=False): '''Generate new Foliant project.''' self.logger.setLevel(DEBUG if debug else WARNING) self.logger.info('Project creation started.') self.logger.debug(f'Template: {template}') template_path = Path(template) if not template_path.exists(): self.logger.debug( f'Template not found in {template_path}, looking in installed templates.' ) installed_templates_path = Path(Path(__file__).parent / 'templates') installed_templates = [ item.name for item in installed_templates_path.iterdir() if item.is_dir() ] self.logger.debug(f'Available templates: {installed_templates}') if template in installed_templates: self.logger.debug('Template found.') else: self.logger.debug('Template not found, asking for user input.') try: template = prompt( f'Please pick a template from {installed_templates}: ', completer=WordCompleter(installed_templates), validator=BuiltinTemplateValidator(installed_templates) ) except KeyboardInterrupt: self.logger.warning('Project creation interrupted.') return template_path = installed_templates_path / template self.logger.debug(f'Template path: {template_path}') if not project_name: self.logger.debug('Project name not specified, asking for user input.') try: project_name = prompt('Enter the project name: ') except KeyboardInterrupt: self.logger.warning('Project creation interrupted.') return project_slug = slugify(project_name) project_path = Path(project_slug) properties = { 'title': project_name, 'slug': project_slug } self.logger.debug(f'Project properties: {properties}') result = None with spinner('Generating project', self.logger, quiet, debug): copytree(template_path, project_path) text_types = '*.md', '*.yml', '*.txt', '*.py' text_file_paths = reduce( lambda acc, matches: acc + [*matches], (project_path.rglob(text_type) for text_type in text_types), [] ) for text_file_path in text_file_paths: self.logger.debug(f'Processing content of {text_file_path}') replace_placeholders(text_file_path, properties) for item in project_path.rglob('*'): self.logger.debug(f'Processing name of {item}') item.rename(Template(item.as_posix()).safe_substitute(properties)) result = project_path if result: self.logger.info(f'Result: {result}') if not quiet: print('─' * 20) print(f'Project "{project_name}" created in {result}') else: print(result) else: self.logger.critical('Project creation failed.') exit(1)
Generate new Foliant project.
Below is the the instruction that describes the task: ### Input: Generate new Foliant project. ### Response: def init(self, project_name='', template='base', quiet=False, debug=False): '''Generate new Foliant project.''' self.logger.setLevel(DEBUG if debug else WARNING) self.logger.info('Project creation started.') self.logger.debug(f'Template: {template}') template_path = Path(template) if not template_path.exists(): self.logger.debug( f'Template not found in {template_path}, looking in installed templates.' ) installed_templates_path = Path(Path(__file__).parent / 'templates') installed_templates = [ item.name for item in installed_templates_path.iterdir() if item.is_dir() ] self.logger.debug(f'Available templates: {installed_templates}') if template in installed_templates: self.logger.debug('Template found.') else: self.logger.debug('Template not found, asking for user input.') try: template = prompt( f'Please pick a template from {installed_templates}: ', completer=WordCompleter(installed_templates), validator=BuiltinTemplateValidator(installed_templates) ) except KeyboardInterrupt: self.logger.warning('Project creation interrupted.') return template_path = installed_templates_path / template self.logger.debug(f'Template path: {template_path}') if not project_name: self.logger.debug('Project name not specified, asking for user input.') try: project_name = prompt('Enter the project name: ') except KeyboardInterrupt: self.logger.warning('Project creation interrupted.') return project_slug = slugify(project_name) project_path = Path(project_slug) properties = { 'title': project_name, 'slug': project_slug } self.logger.debug(f'Project properties: {properties}') result = None with spinner('Generating project', self.logger, quiet, debug): copytree(template_path, project_path) text_types = '*.md', '*.yml', '*.txt', '*.py' text_file_paths = reduce( lambda acc, matches: acc + [*matches], (project_path.rglob(text_type) for text_type in text_types), [] ) for text_file_path in text_file_paths: self.logger.debug(f'Processing content of {text_file_path}') replace_placeholders(text_file_path, properties) for item in project_path.rglob('*'): self.logger.debug(f'Processing name of {item}') item.rename(Template(item.as_posix()).safe_substitute(properties)) result = project_path if result: self.logger.info(f'Result: {result}') if not quiet: print('─' * 20) print(f'Project "{project_name}" created in {result}') else: print(result) else: self.logger.critical('Project creation failed.') exit(1)
def get_sentence_id(self, element): """returns the ID of the sentence the given element belongs to.""" try: sentence_elem = element.iterancestors('sentence').next() except StopIteration as e: warnings.warn("<{}> element is not a descendant of a <sentence> " "We'll try to extract the sentence ID from the " "prefix of the element ID".format(element.tag)) return self.get_element_id(element).split('_')[0] return self.get_element_id(sentence_elem)
returns the ID of the sentence the given element belongs to.
Below is the the instruction that describes the task: ### Input: returns the ID of the sentence the given element belongs to. ### Response: def get_sentence_id(self, element): """returns the ID of the sentence the given element belongs to.""" try: sentence_elem = element.iterancestors('sentence').next() except StopIteration as e: warnings.warn("<{}> element is not a descendant of a <sentence> " "We'll try to extract the sentence ID from the " "prefix of the element ID".format(element.tag)) return self.get_element_id(element).split('_')[0] return self.get_element_id(sentence_elem)
def post_install_postgresql(): """ example default hook for installing postgresql """ from django.conf import settings as s with settings(warn_only=True): sudo('/etc/init.d/postgresql-8.4 restart') sudo("""psql template1 -c "ALTER USER postgres with encrypted password '%s';" """% env.password, user='postgres') sudo("psql -f /usr/share/postgresql/8.4/contrib/adminpack.sql", user='postgres') if s.DATABASES['default']['ENGINE']=='django.db.backends.postgresql_psycopg2': sudo("""psql template1 -c "CREATE ROLE %s LOGIN with encrypted password '%s';" """% (s.DATABASES['default']['USER'],s.DATABASES['default']['PASSWORD']), user='postgres') sudo('createdb -T template0 -O %s %s'% (s.DATABASES['default']['USER'],s.DATABASES['default']['NAME']), user='postgres') print "* setup postgres user password with your '%s' password"% env.user print "* imported the adminpack" print "Post install setup of Postgresql complete!"
example default hook for installing postgresql
Below is the the instruction that describes the task: ### Input: example default hook for installing postgresql ### Response: def post_install_postgresql(): """ example default hook for installing postgresql """ from django.conf import settings as s with settings(warn_only=True): sudo('/etc/init.d/postgresql-8.4 restart') sudo("""psql template1 -c "ALTER USER postgres with encrypted password '%s';" """% env.password, user='postgres') sudo("psql -f /usr/share/postgresql/8.4/contrib/adminpack.sql", user='postgres') if s.DATABASES['default']['ENGINE']=='django.db.backends.postgresql_psycopg2': sudo("""psql template1 -c "CREATE ROLE %s LOGIN with encrypted password '%s';" """% (s.DATABASES['default']['USER'],s.DATABASES['default']['PASSWORD']), user='postgres') sudo('createdb -T template0 -O %s %s'% (s.DATABASES['default']['USER'],s.DATABASES['default']['NAME']), user='postgres') print "* setup postgres user password with your '%s' password"% env.user print "* imported the adminpack" print "Post install setup of Postgresql complete!"
def add_hydrogens(self, formal_charges=None): """Returns a molecular graph where hydrogens are added explicitely When the bond order is unknown, it assumes bond order one. If the graph has an attribute formal_charges, this routine will take it into account when counting the number of hydrogens to be added. The returned graph will also have a formal_charges attribute. This routine only adds hydrogen atoms for a limited set of atoms from the periodic system: B, C, N, O, F, Al, Si, P, S, Cl, Br. """ new_edges = list(self.edges) counter = self.num_vertices for i in range(self.num_vertices): num_elec = self.numbers[i] if formal_charges is not None: num_elec -= int(formal_charges[i]) if num_elec >= 5 and num_elec <= 9: num_hydrogen = num_elec - 10 + 8 elif num_elec >= 13 and num_elec <= 17: num_hydrogen = num_elec - 18 + 8 elif num_elec == 35: num_hydrogen = 1 else: continue if num_hydrogen > 4: num_hydrogen = 8 - num_hydrogen for n in self.neighbors[i]: bo = self.orders[self.edge_index[frozenset([i, n])]] if bo <= 0: bo = 1 num_hydrogen -= int(bo) for j in range(num_hydrogen): new_edges.append((i, counter)) counter += 1 new_numbers = np.zeros(counter, int) new_numbers[:self.num_vertices] = self.numbers new_numbers[self.num_vertices:] = 1 new_orders = np.zeros(len(new_edges), int) new_orders[:self.num_edges] = self.orders new_orders[self.num_edges:] = 1 result = MolecularGraph(new_edges, new_numbers, new_orders) return result
Returns a molecular graph where hydrogens are added explicitely When the bond order is unknown, it assumes bond order one. If the graph has an attribute formal_charges, this routine will take it into account when counting the number of hydrogens to be added. The returned graph will also have a formal_charges attribute. This routine only adds hydrogen atoms for a limited set of atoms from the periodic system: B, C, N, O, F, Al, Si, P, S, Cl, Br.
Below is the the instruction that describes the task: ### Input: Returns a molecular graph where hydrogens are added explicitely When the bond order is unknown, it assumes bond order one. If the graph has an attribute formal_charges, this routine will take it into account when counting the number of hydrogens to be added. The returned graph will also have a formal_charges attribute. This routine only adds hydrogen atoms for a limited set of atoms from the periodic system: B, C, N, O, F, Al, Si, P, S, Cl, Br. ### Response: def add_hydrogens(self, formal_charges=None): """Returns a molecular graph where hydrogens are added explicitely When the bond order is unknown, it assumes bond order one. If the graph has an attribute formal_charges, this routine will take it into account when counting the number of hydrogens to be added. The returned graph will also have a formal_charges attribute. This routine only adds hydrogen atoms for a limited set of atoms from the periodic system: B, C, N, O, F, Al, Si, P, S, Cl, Br. """ new_edges = list(self.edges) counter = self.num_vertices for i in range(self.num_vertices): num_elec = self.numbers[i] if formal_charges is not None: num_elec -= int(formal_charges[i]) if num_elec >= 5 and num_elec <= 9: num_hydrogen = num_elec - 10 + 8 elif num_elec >= 13 and num_elec <= 17: num_hydrogen = num_elec - 18 + 8 elif num_elec == 35: num_hydrogen = 1 else: continue if num_hydrogen > 4: num_hydrogen = 8 - num_hydrogen for n in self.neighbors[i]: bo = self.orders[self.edge_index[frozenset([i, n])]] if bo <= 0: bo = 1 num_hydrogen -= int(bo) for j in range(num_hydrogen): new_edges.append((i, counter)) counter += 1 new_numbers = np.zeros(counter, int) new_numbers[:self.num_vertices] = self.numbers new_numbers[self.num_vertices:] = 1 new_orders = np.zeros(len(new_edges), int) new_orders[:self.num_edges] = self.orders new_orders[self.num_edges:] = 1 result = MolecularGraph(new_edges, new_numbers, new_orders) return result
def transliterate(text,lang1_code,lang2_code): """ convert the source language script (lang1) to target language script (lang2) text: text to transliterate lang1_code: language 1 code lang1_code: language 2 code """ if (lang1_code in langinfo.SCRIPT_RANGES) and (lang2_code in langinfo.SCRIPT_RANGES): # if Sinhala is source, do a mapping to Devanagari first if lang1_code=='si': text=sdt.sinhala_to_devanagari(text) lang1_code='hi' # if Sinhala is target, make Devanagiri the intermediate target org_lang2_code='' if lang2_code=='si': lang2_code='hi' org_lang2_code='si' trans_lit_text=[] for c in text: newc=c offset=ord(c)-langinfo.SCRIPT_RANGES[lang1_code][0] if offset >=langinfo.COORDINATED_RANGE_START_INCLUSIVE and offset <= langinfo.COORDINATED_RANGE_END_INCLUSIVE: if lang2_code=='ta': # tamil exceptions offset=UnicodeIndicTransliterator._correct_tamil_mapping(offset) newc=py23char(langinfo.SCRIPT_RANGES[lang2_code][0]+offset) trans_lit_text.append(newc) # if Sinhala is source, do a mapping to Devanagari first if org_lang2_code=='si': return sdt.devanagari_to_sinhala(''.join(trans_lit_text)) return (''.join(trans_lit_text)) else: return text
convert the source language script (lang1) to target language script (lang2) text: text to transliterate lang1_code: language 1 code lang1_code: language 2 code
Below is the the instruction that describes the task: ### Input: convert the source language script (lang1) to target language script (lang2) text: text to transliterate lang1_code: language 1 code lang1_code: language 2 code ### Response: def transliterate(text,lang1_code,lang2_code): """ convert the source language script (lang1) to target language script (lang2) text: text to transliterate lang1_code: language 1 code lang1_code: language 2 code """ if (lang1_code in langinfo.SCRIPT_RANGES) and (lang2_code in langinfo.SCRIPT_RANGES): # if Sinhala is source, do a mapping to Devanagari first if lang1_code=='si': text=sdt.sinhala_to_devanagari(text) lang1_code='hi' # if Sinhala is target, make Devanagiri the intermediate target org_lang2_code='' if lang2_code=='si': lang2_code='hi' org_lang2_code='si' trans_lit_text=[] for c in text: newc=c offset=ord(c)-langinfo.SCRIPT_RANGES[lang1_code][0] if offset >=langinfo.COORDINATED_RANGE_START_INCLUSIVE and offset <= langinfo.COORDINATED_RANGE_END_INCLUSIVE: if lang2_code=='ta': # tamil exceptions offset=UnicodeIndicTransliterator._correct_tamil_mapping(offset) newc=py23char(langinfo.SCRIPT_RANGES[lang2_code][0]+offset) trans_lit_text.append(newc) # if Sinhala is source, do a mapping to Devanagari first if org_lang2_code=='si': return sdt.devanagari_to_sinhala(''.join(trans_lit_text)) return (''.join(trans_lit_text)) else: return text
def replicate(self, dst_lun_id, max_time_out_of_sync, replication_name=None, replicate_existing_snaps=None, remote_system=None): """ Creates a replication session with a existing lun as destination. :param dst_lun_id: destination lun id. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param replication_name: replication name. :param replicate_existing_snaps: whether to replicate existing snaps. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :return: created replication session. """ return UnityReplicationSession.create( self._cli, self.get_id(), dst_lun_id, max_time_out_of_sync, name=replication_name, replicate_existing_snaps=replicate_existing_snaps, remote_system=remote_system)
Creates a replication session with a existing lun as destination. :param dst_lun_id: destination lun id. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param replication_name: replication name. :param replicate_existing_snaps: whether to replicate existing snaps. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :return: created replication session.
Below is the the instruction that describes the task: ### Input: Creates a replication session with a existing lun as destination. :param dst_lun_id: destination lun id. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param replication_name: replication name. :param replicate_existing_snaps: whether to replicate existing snaps. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :return: created replication session. ### Response: def replicate(self, dst_lun_id, max_time_out_of_sync, replication_name=None, replicate_existing_snaps=None, remote_system=None): """ Creates a replication session with a existing lun as destination. :param dst_lun_id: destination lun id. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param replication_name: replication name. :param replicate_existing_snaps: whether to replicate existing snaps. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :return: created replication session. """ return UnityReplicationSession.create( self._cli, self.get_id(), dst_lun_id, max_time_out_of_sync, name=replication_name, replicate_existing_snaps=replicate_existing_snaps, remote_system=remote_system)
async def authenticate_redirect(self, callback_uri: str = None) -> None: """Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ http = self.get_auth_http_client() response = await http.fetch( self._oauth_request_token_url(callback_uri=callback_uri) ) self._on_request_token(self._OAUTH_AUTHENTICATE_URL, None, response)
Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead.
Below is the the instruction that describes the task: ### Input: Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. ### Response: async def authenticate_redirect(self, callback_uri: str = None) -> None: """Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ http = self.get_auth_http_client() response = await http.fetch( self._oauth_request_token_url(callback_uri=callback_uri) ) self._on_request_token(self._OAUTH_AUTHENTICATE_URL, None, response)
def multidict_to_dict(d): """ Turns a werkzeug.MultiDict or django.MultiValueDict into a dict with list values :param d: a MultiDict or MultiValueDict instance :return: a dict instance """ return dict((k, v[0] if len(v) == 1 else v) for k, v in iterlists(d))
Turns a werkzeug.MultiDict or django.MultiValueDict into a dict with list values :param d: a MultiDict or MultiValueDict instance :return: a dict instance
Below is the the instruction that describes the task: ### Input: Turns a werkzeug.MultiDict or django.MultiValueDict into a dict with list values :param d: a MultiDict or MultiValueDict instance :return: a dict instance ### Response: def multidict_to_dict(d): """ Turns a werkzeug.MultiDict or django.MultiValueDict into a dict with list values :param d: a MultiDict or MultiValueDict instance :return: a dict instance """ return dict((k, v[0] if len(v) == 1 else v) for k, v in iterlists(d))
def estimate_umbrella_sampling( us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=None, md_dtrajs=None, kT=None, maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, estimator='wham', lag=1, dt_traj='1 step', init=None, init_maxiter=10000, init_maxerr=1.0E-8, width=None, **kwargs): r""" This function acts as a wrapper for ``tram()``, ``dtram()``, ``mbar()``, and ``wham()`` and handles the calculation of bias energies (``bias``) and thermodynamic state trajectories (``ttrajs``) when the data comes from umbrella sampling and (optional) unbiased simulations. Parameters ---------- us_trajs : list of N arrays, each of shape (T_i, d) List of arrays, each having T_i rows, one for each time step, and d columns where d is the dimensionality of the subspace in which umbrella sampling was applied. Often d=1, and thus us_trajs will be a list of 1d-arrays. us_dtrajs : list of N int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the umbrella sampling trajectory is in at any time. us_centers : list of N floats or d-dimensional arrays of floats List or array of N center positions. Each position must be a d-dimensional vector. For 1d umbrella sampling, one can simply pass a list of centers, e.g. [-5.0, -4.0, -3.0, ... ]. us_force_constants : list of N floats or d- or dxd-dimensional arrays of floats The force constants used in the umbrellas, unit-less (e.g. kT per squared length unit). For multidimensional umbrella sampling, the force matrix must be used. md_trajs : list of M arrays, each of shape (T_i, d), optional, default=None Unbiased molecular dynamics simulations; format like us_trajs. md_dtrajs : list of M int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the unbiased trajectory is in at any time. kT : float or None, optional, default=None Use this attribute if the supplied force constants are NOT unit-less; kT must have the same energy unit as the force constants. maxiter : int, optional, default=10000 The maximum number of self-consistent iterations before the estimator exits unsuccessfully. maxerr : float, optional, default=1.0E-15 Convergence criterion based on the maximal free energy change in a self-consistent iteration step. save_convergence_info : int, optional, default=0 Every save_convergence_info iteration steps, store the actual increment and the actual loglikelihood; 0 means no storage. estimator : str, optional, default='wham' Specify one of the available estimators | 'wham': use WHAM | 'mbar': use MBAR | 'dtram': use the discrete version of TRAM | 'tram': use TRAM lag : int or list of int, optional, default=1 Integer lag time at which transitions are counted. Providing a list of lag times will trigger one estimation per lag time. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' init : str, optional, default=None Use a specific initialization for the self-consistent iteration: | None: use a hard-coded guess for free energies and Lagrangian multipliers | 'wham': perform a short WHAM estimate to initialize the free energies (only with dtram) | 'mbar': perform a short MBAR estimate to initialize the free energies (only with tram) init_maxiter : int, optional, default=10000 The maximum number of self-consistent iterations during the initialization. init_maxerr : float, optional, default=1.0E-8 Convergence criterion for the initialization. width : array-like of float, optional, default=None Specify periodicity for individual us_traj dimensions. Each positive entry will make the corresponding feature periodic and use the given value as width. None/zero values will be treated as non-periodic. **kwargs : dict, optional You can use this to pass estimator-specific named parameters to the chosen estimator, which are not already coverd by ``estimate_umbrella_sampling()``. Returns ------- A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof The requested estimator/model object, i.e., WHAM, MBAR, DTRAM or TRAM. If multiple lag times are given, a list of objects is returned (one MEMM per lag time). Example ------- We look at a 1D umbrella sampling simulation with two umbrellas at 1.1 and 1.3 on the reaction coordinate with spring constant of 1.0; additionally, we have two unbiased simulations. We start with a joint clustering and use TRAM for the estimation: >>> from pyemma.coordinates import cluster_regspace as regspace >>> from pyemma.thermo import estimate_umbrella_sampling as estimate_us >>> import numpy as np >>> us_centers = [1.1, 1.3] >>> us_force_constants = [1.0, 1.0] >>> us_trajs = [np.array([1.0, 1.1, 1.2, 1.1, 1.0, 1.1]).reshape((-1, 1)), np.array([1.3, 1.2, 1.3, 1.4, 1.4, 1.3]).reshape((-1, 1))] >>> md_trajs = [np.array([0.9, 1.0, 1.1, 1.2, 1.3, 1.4]).reshape((-1, 1)), np.array([1.5, 1.4, 1.3, 1.4, 1.4, 1.5]).reshape((-1, 1))] >>> cluster = regspace(data=us_trajs+md_trajs, max_centers=10, dmin=0.15) >>> us_dtrajs = cluster.dtrajs[:2] >>> md_dtrajs = cluster.dtrajs[2:] >>> centers = cluster.clustercenters >>> tram = estimate_us(us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=md_trajs, md_dtrajs=md_dtrajs, estimator='tram', lag=1) >>> tram.f # doctest: +ELLIPSIS array([ 0.63..., 1.60..., 1.31...]) See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation. .. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :attributes: .. autoclass:: pyemma.thermo.models.memm.MEMM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.memm.MEMM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.memm.MEMM :attributes: """ from .util import get_umbrella_sampling_data as _get_umbrella_sampling_data # sanity checks if estimator not in ['wham', 'mbar', 'dtram', 'tram']: raise ValueError("unsupported estimator: %s" % estimator) if not isinstance(us_trajs, (list, tuple)): raise ValueError("The parameter us_trajs must be a list of numpy.ndarray objects") if not isinstance(us_centers, (list, tuple)): raise ValueError( "The parameter us_centers must be a list of floats or numpy.ndarray objects") if not isinstance(us_force_constants, (list, tuple)): raise ValueError( "The parameter us_force_constants must be a list of floats or numpy.ndarray objects") if len(us_trajs) != len(us_centers): raise ValueError("Unmatching number of umbrella sampling trajectories and centers: %d!=%d" \ % (len(us_trajs), len(us_centers))) if len(us_trajs) != len(us_force_constants): raise ValueError( "Unmatching number of umbrella sampling trajectories and force constants: %d!=%d" \ % (len(us_trajs), len(us_force_constants))) if len(us_trajs) != len(us_dtrajs): raise ValueError( "Number of continuous and discrete umbrella sampling trajectories does not " + \ "match: %d!=%d" % (len(us_trajs), len(us_dtrajs))) i = 0 for traj, dtraj in zip(us_trajs, us_dtrajs): if traj.shape[0] != dtraj.shape[0]: raise ValueError( "Lengths of continuous and discrete umbrella sampling trajectories with " + \ "index %d does not match: %d!=%d" % (i, len(us_trajs), len(us_dtrajs))) i += 1 if md_trajs is not None: if not isinstance(md_trajs, (list, tuple)): raise ValueError("The parameter md_trajs must be a list of numpy.ndarray objects") if md_dtrajs is None: raise ValueError("You have provided md_trajs, but md_dtrajs is None") if md_dtrajs is None: md_dtrajs = [] else: if md_trajs is None: raise ValueError("You have provided md_dtrajs, but md_trajs is None") if len(md_trajs) != len(md_dtrajs): raise ValueError( "Number of continuous and discrete unbiased trajectories does not " + \ "match: %d!=%d" % (len(md_trajs), len(md_dtrajs))) i = 0 for traj, dtraj in zip(md_trajs, md_dtrajs): if traj.shape[0] != dtraj.shape[0]: raise ValueError( "Lengths of continuous and discrete unbiased trajectories with " + \ "index %d does not match: %d!=%d" % (i, len(md_trajs), len(md_dtrajs))) i += 1 # data preparation ttrajs, btrajs, umbrella_centers, force_constants, unbiased_state = _get_umbrella_sampling_data( us_trajs, us_centers, us_force_constants, md_trajs=md_trajs, kT=kT, width=width) estimator_obj = None # estimation if estimator == 'wham': estimator_obj = wham( ttrajs, us_dtrajs + md_dtrajs, _get_averaged_bias_matrix(btrajs, us_dtrajs + md_dtrajs), maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj) elif estimator == 'mbar': allowed_keys = ['direct_space'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = mbar( ttrajs, us_dtrajs + md_dtrajs, btrajs, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, **parsed_kwargs) elif estimator == 'dtram': allowed_keys = ['count_mode', 'connectivity'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = dtram( ttrajs, us_dtrajs + md_dtrajs, _get_averaged_bias_matrix(btrajs, us_dtrajs + md_dtrajs), lag, unbiased_state=unbiased_state, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr, **parsed_kwargs) elif estimator == 'tram': allowed_keys = [ 'count_mode', 'connectivity', 'connectivity_factor','nn', 'direct_space', 'N_dtram_accelerations', 'equilibrium', 'overcounting_factor', 'callback'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = tram( ttrajs, us_dtrajs + md_dtrajs, btrajs, lag, unbiased_state=unbiased_state, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr, **parsed_kwargs) # adding thermodynamic state information and return results try: estimator_obj.umbrella_centers = umbrella_centers estimator_obj.force_constants = force_constants except AttributeError: for obj in estimator_obj: obj.umbrella_centers = umbrella_centers obj.force_constants = force_constants return estimator_obj
r""" This function acts as a wrapper for ``tram()``, ``dtram()``, ``mbar()``, and ``wham()`` and handles the calculation of bias energies (``bias``) and thermodynamic state trajectories (``ttrajs``) when the data comes from umbrella sampling and (optional) unbiased simulations. Parameters ---------- us_trajs : list of N arrays, each of shape (T_i, d) List of arrays, each having T_i rows, one for each time step, and d columns where d is the dimensionality of the subspace in which umbrella sampling was applied. Often d=1, and thus us_trajs will be a list of 1d-arrays. us_dtrajs : list of N int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the umbrella sampling trajectory is in at any time. us_centers : list of N floats or d-dimensional arrays of floats List or array of N center positions. Each position must be a d-dimensional vector. For 1d umbrella sampling, one can simply pass a list of centers, e.g. [-5.0, -4.0, -3.0, ... ]. us_force_constants : list of N floats or d- or dxd-dimensional arrays of floats The force constants used in the umbrellas, unit-less (e.g. kT per squared length unit). For multidimensional umbrella sampling, the force matrix must be used. md_trajs : list of M arrays, each of shape (T_i, d), optional, default=None Unbiased molecular dynamics simulations; format like us_trajs. md_dtrajs : list of M int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the unbiased trajectory is in at any time. kT : float or None, optional, default=None Use this attribute if the supplied force constants are NOT unit-less; kT must have the same energy unit as the force constants. maxiter : int, optional, default=10000 The maximum number of self-consistent iterations before the estimator exits unsuccessfully. maxerr : float, optional, default=1.0E-15 Convergence criterion based on the maximal free energy change in a self-consistent iteration step. save_convergence_info : int, optional, default=0 Every save_convergence_info iteration steps, store the actual increment and the actual loglikelihood; 0 means no storage. estimator : str, optional, default='wham' Specify one of the available estimators | 'wham': use WHAM | 'mbar': use MBAR | 'dtram': use the discrete version of TRAM | 'tram': use TRAM lag : int or list of int, optional, default=1 Integer lag time at which transitions are counted. Providing a list of lag times will trigger one estimation per lag time. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' init : str, optional, default=None Use a specific initialization for the self-consistent iteration: | None: use a hard-coded guess for free energies and Lagrangian multipliers | 'wham': perform a short WHAM estimate to initialize the free energies (only with dtram) | 'mbar': perform a short MBAR estimate to initialize the free energies (only with tram) init_maxiter : int, optional, default=10000 The maximum number of self-consistent iterations during the initialization. init_maxerr : float, optional, default=1.0E-8 Convergence criterion for the initialization. width : array-like of float, optional, default=None Specify periodicity for individual us_traj dimensions. Each positive entry will make the corresponding feature periodic and use the given value as width. None/zero values will be treated as non-periodic. **kwargs : dict, optional You can use this to pass estimator-specific named parameters to the chosen estimator, which are not already coverd by ``estimate_umbrella_sampling()``. Returns ------- A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof The requested estimator/model object, i.e., WHAM, MBAR, DTRAM or TRAM. If multiple lag times are given, a list of objects is returned (one MEMM per lag time). Example ------- We look at a 1D umbrella sampling simulation with two umbrellas at 1.1 and 1.3 on the reaction coordinate with spring constant of 1.0; additionally, we have two unbiased simulations. We start with a joint clustering and use TRAM for the estimation: >>> from pyemma.coordinates import cluster_regspace as regspace >>> from pyemma.thermo import estimate_umbrella_sampling as estimate_us >>> import numpy as np >>> us_centers = [1.1, 1.3] >>> us_force_constants = [1.0, 1.0] >>> us_trajs = [np.array([1.0, 1.1, 1.2, 1.1, 1.0, 1.1]).reshape((-1, 1)), np.array([1.3, 1.2, 1.3, 1.4, 1.4, 1.3]).reshape((-1, 1))] >>> md_trajs = [np.array([0.9, 1.0, 1.1, 1.2, 1.3, 1.4]).reshape((-1, 1)), np.array([1.5, 1.4, 1.3, 1.4, 1.4, 1.5]).reshape((-1, 1))] >>> cluster = regspace(data=us_trajs+md_trajs, max_centers=10, dmin=0.15) >>> us_dtrajs = cluster.dtrajs[:2] >>> md_dtrajs = cluster.dtrajs[2:] >>> centers = cluster.clustercenters >>> tram = estimate_us(us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=md_trajs, md_dtrajs=md_dtrajs, estimator='tram', lag=1) >>> tram.f # doctest: +ELLIPSIS array([ 0.63..., 1.60..., 1.31...]) See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation. .. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :attributes: .. autoclass:: pyemma.thermo.models.memm.MEMM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.memm.MEMM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.memm.MEMM :attributes:
Below is the the instruction that describes the task: ### Input: r""" This function acts as a wrapper for ``tram()``, ``dtram()``, ``mbar()``, and ``wham()`` and handles the calculation of bias energies (``bias``) and thermodynamic state trajectories (``ttrajs``) when the data comes from umbrella sampling and (optional) unbiased simulations. Parameters ---------- us_trajs : list of N arrays, each of shape (T_i, d) List of arrays, each having T_i rows, one for each time step, and d columns where d is the dimensionality of the subspace in which umbrella sampling was applied. Often d=1, and thus us_trajs will be a list of 1d-arrays. us_dtrajs : list of N int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the umbrella sampling trajectory is in at any time. us_centers : list of N floats or d-dimensional arrays of floats List or array of N center positions. Each position must be a d-dimensional vector. For 1d umbrella sampling, one can simply pass a list of centers, e.g. [-5.0, -4.0, -3.0, ... ]. us_force_constants : list of N floats or d- or dxd-dimensional arrays of floats The force constants used in the umbrellas, unit-less (e.g. kT per squared length unit). For multidimensional umbrella sampling, the force matrix must be used. md_trajs : list of M arrays, each of shape (T_i, d), optional, default=None Unbiased molecular dynamics simulations; format like us_trajs. md_dtrajs : list of M int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the unbiased trajectory is in at any time. kT : float or None, optional, default=None Use this attribute if the supplied force constants are NOT unit-less; kT must have the same energy unit as the force constants. maxiter : int, optional, default=10000 The maximum number of self-consistent iterations before the estimator exits unsuccessfully. maxerr : float, optional, default=1.0E-15 Convergence criterion based on the maximal free energy change in a self-consistent iteration step. save_convergence_info : int, optional, default=0 Every save_convergence_info iteration steps, store the actual increment and the actual loglikelihood; 0 means no storage. estimator : str, optional, default='wham' Specify one of the available estimators | 'wham': use WHAM | 'mbar': use MBAR | 'dtram': use the discrete version of TRAM | 'tram': use TRAM lag : int or list of int, optional, default=1 Integer lag time at which transitions are counted. Providing a list of lag times will trigger one estimation per lag time. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' init : str, optional, default=None Use a specific initialization for the self-consistent iteration: | None: use a hard-coded guess for free energies and Lagrangian multipliers | 'wham': perform a short WHAM estimate to initialize the free energies (only with dtram) | 'mbar': perform a short MBAR estimate to initialize the free energies (only with tram) init_maxiter : int, optional, default=10000 The maximum number of self-consistent iterations during the initialization. init_maxerr : float, optional, default=1.0E-8 Convergence criterion for the initialization. width : array-like of float, optional, default=None Specify periodicity for individual us_traj dimensions. Each positive entry will make the corresponding feature periodic and use the given value as width. None/zero values will be treated as non-periodic. **kwargs : dict, optional You can use this to pass estimator-specific named parameters to the chosen estimator, which are not already coverd by ``estimate_umbrella_sampling()``. Returns ------- A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof The requested estimator/model object, i.e., WHAM, MBAR, DTRAM or TRAM. If multiple lag times are given, a list of objects is returned (one MEMM per lag time). Example ------- We look at a 1D umbrella sampling simulation with two umbrellas at 1.1 and 1.3 on the reaction coordinate with spring constant of 1.0; additionally, we have two unbiased simulations. We start with a joint clustering and use TRAM for the estimation: >>> from pyemma.coordinates import cluster_regspace as regspace >>> from pyemma.thermo import estimate_umbrella_sampling as estimate_us >>> import numpy as np >>> us_centers = [1.1, 1.3] >>> us_force_constants = [1.0, 1.0] >>> us_trajs = [np.array([1.0, 1.1, 1.2, 1.1, 1.0, 1.1]).reshape((-1, 1)), np.array([1.3, 1.2, 1.3, 1.4, 1.4, 1.3]).reshape((-1, 1))] >>> md_trajs = [np.array([0.9, 1.0, 1.1, 1.2, 1.3, 1.4]).reshape((-1, 1)), np.array([1.5, 1.4, 1.3, 1.4, 1.4, 1.5]).reshape((-1, 1))] >>> cluster = regspace(data=us_trajs+md_trajs, max_centers=10, dmin=0.15) >>> us_dtrajs = cluster.dtrajs[:2] >>> md_dtrajs = cluster.dtrajs[2:] >>> centers = cluster.clustercenters >>> tram = estimate_us(us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=md_trajs, md_dtrajs=md_dtrajs, estimator='tram', lag=1) >>> tram.f # doctest: +ELLIPSIS array([ 0.63..., 1.60..., 1.31...]) See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation. .. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :attributes: .. autoclass:: pyemma.thermo.models.memm.MEMM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.memm.MEMM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.memm.MEMM :attributes: ### Response: def estimate_umbrella_sampling( us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=None, md_dtrajs=None, kT=None, maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, estimator='wham', lag=1, dt_traj='1 step', init=None, init_maxiter=10000, init_maxerr=1.0E-8, width=None, **kwargs): r""" This function acts as a wrapper for ``tram()``, ``dtram()``, ``mbar()``, and ``wham()`` and handles the calculation of bias energies (``bias``) and thermodynamic state trajectories (``ttrajs``) when the data comes from umbrella sampling and (optional) unbiased simulations. Parameters ---------- us_trajs : list of N arrays, each of shape (T_i, d) List of arrays, each having T_i rows, one for each time step, and d columns where d is the dimensionality of the subspace in which umbrella sampling was applied. Often d=1, and thus us_trajs will be a list of 1d-arrays. us_dtrajs : list of N int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the umbrella sampling trajectory is in at any time. us_centers : list of N floats or d-dimensional arrays of floats List or array of N center positions. Each position must be a d-dimensional vector. For 1d umbrella sampling, one can simply pass a list of centers, e.g. [-5.0, -4.0, -3.0, ... ]. us_force_constants : list of N floats or d- or dxd-dimensional arrays of floats The force constants used in the umbrellas, unit-less (e.g. kT per squared length unit). For multidimensional umbrella sampling, the force matrix must be used. md_trajs : list of M arrays, each of shape (T_i, d), optional, default=None Unbiased molecular dynamics simulations; format like us_trajs. md_dtrajs : list of M int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the unbiased trajectory is in at any time. kT : float or None, optional, default=None Use this attribute if the supplied force constants are NOT unit-less; kT must have the same energy unit as the force constants. maxiter : int, optional, default=10000 The maximum number of self-consistent iterations before the estimator exits unsuccessfully. maxerr : float, optional, default=1.0E-15 Convergence criterion based on the maximal free energy change in a self-consistent iteration step. save_convergence_info : int, optional, default=0 Every save_convergence_info iteration steps, store the actual increment and the actual loglikelihood; 0 means no storage. estimator : str, optional, default='wham' Specify one of the available estimators | 'wham': use WHAM | 'mbar': use MBAR | 'dtram': use the discrete version of TRAM | 'tram': use TRAM lag : int or list of int, optional, default=1 Integer lag time at which transitions are counted. Providing a list of lag times will trigger one estimation per lag time. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' init : str, optional, default=None Use a specific initialization for the self-consistent iteration: | None: use a hard-coded guess for free energies and Lagrangian multipliers | 'wham': perform a short WHAM estimate to initialize the free energies (only with dtram) | 'mbar': perform a short MBAR estimate to initialize the free energies (only with tram) init_maxiter : int, optional, default=10000 The maximum number of self-consistent iterations during the initialization. init_maxerr : float, optional, default=1.0E-8 Convergence criterion for the initialization. width : array-like of float, optional, default=None Specify periodicity for individual us_traj dimensions. Each positive entry will make the corresponding feature periodic and use the given value as width. None/zero values will be treated as non-periodic. **kwargs : dict, optional You can use this to pass estimator-specific named parameters to the chosen estimator, which are not already coverd by ``estimate_umbrella_sampling()``. Returns ------- A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof The requested estimator/model object, i.e., WHAM, MBAR, DTRAM or TRAM. If multiple lag times are given, a list of objects is returned (one MEMM per lag time). Example ------- We look at a 1D umbrella sampling simulation with two umbrellas at 1.1 and 1.3 on the reaction coordinate with spring constant of 1.0; additionally, we have two unbiased simulations. We start with a joint clustering and use TRAM for the estimation: >>> from pyemma.coordinates import cluster_regspace as regspace >>> from pyemma.thermo import estimate_umbrella_sampling as estimate_us >>> import numpy as np >>> us_centers = [1.1, 1.3] >>> us_force_constants = [1.0, 1.0] >>> us_trajs = [np.array([1.0, 1.1, 1.2, 1.1, 1.0, 1.1]).reshape((-1, 1)), np.array([1.3, 1.2, 1.3, 1.4, 1.4, 1.3]).reshape((-1, 1))] >>> md_trajs = [np.array([0.9, 1.0, 1.1, 1.2, 1.3, 1.4]).reshape((-1, 1)), np.array([1.5, 1.4, 1.3, 1.4, 1.4, 1.5]).reshape((-1, 1))] >>> cluster = regspace(data=us_trajs+md_trajs, max_centers=10, dmin=0.15) >>> us_dtrajs = cluster.dtrajs[:2] >>> md_dtrajs = cluster.dtrajs[2:] >>> centers = cluster.clustercenters >>> tram = estimate_us(us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=md_trajs, md_dtrajs=md_dtrajs, estimator='tram', lag=1) >>> tram.f # doctest: +ELLIPSIS array([ 0.63..., 1.60..., 1.31...]) See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation. .. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :attributes: .. autoclass:: pyemma.thermo.models.memm.MEMM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.memm.MEMM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.memm.MEMM :attributes: """ from .util import get_umbrella_sampling_data as _get_umbrella_sampling_data # sanity checks if estimator not in ['wham', 'mbar', 'dtram', 'tram']: raise ValueError("unsupported estimator: %s" % estimator) if not isinstance(us_trajs, (list, tuple)): raise ValueError("The parameter us_trajs must be a list of numpy.ndarray objects") if not isinstance(us_centers, (list, tuple)): raise ValueError( "The parameter us_centers must be a list of floats or numpy.ndarray objects") if not isinstance(us_force_constants, (list, tuple)): raise ValueError( "The parameter us_force_constants must be a list of floats or numpy.ndarray objects") if len(us_trajs) != len(us_centers): raise ValueError("Unmatching number of umbrella sampling trajectories and centers: %d!=%d" \ % (len(us_trajs), len(us_centers))) if len(us_trajs) != len(us_force_constants): raise ValueError( "Unmatching number of umbrella sampling trajectories and force constants: %d!=%d" \ % (len(us_trajs), len(us_force_constants))) if len(us_trajs) != len(us_dtrajs): raise ValueError( "Number of continuous and discrete umbrella sampling trajectories does not " + \ "match: %d!=%d" % (len(us_trajs), len(us_dtrajs))) i = 0 for traj, dtraj in zip(us_trajs, us_dtrajs): if traj.shape[0] != dtraj.shape[0]: raise ValueError( "Lengths of continuous and discrete umbrella sampling trajectories with " + \ "index %d does not match: %d!=%d" % (i, len(us_trajs), len(us_dtrajs))) i += 1 if md_trajs is not None: if not isinstance(md_trajs, (list, tuple)): raise ValueError("The parameter md_trajs must be a list of numpy.ndarray objects") if md_dtrajs is None: raise ValueError("You have provided md_trajs, but md_dtrajs is None") if md_dtrajs is None: md_dtrajs = [] else: if md_trajs is None: raise ValueError("You have provided md_dtrajs, but md_trajs is None") if len(md_trajs) != len(md_dtrajs): raise ValueError( "Number of continuous and discrete unbiased trajectories does not " + \ "match: %d!=%d" % (len(md_trajs), len(md_dtrajs))) i = 0 for traj, dtraj in zip(md_trajs, md_dtrajs): if traj.shape[0] != dtraj.shape[0]: raise ValueError( "Lengths of continuous and discrete unbiased trajectories with " + \ "index %d does not match: %d!=%d" % (i, len(md_trajs), len(md_dtrajs))) i += 1 # data preparation ttrajs, btrajs, umbrella_centers, force_constants, unbiased_state = _get_umbrella_sampling_data( us_trajs, us_centers, us_force_constants, md_trajs=md_trajs, kT=kT, width=width) estimator_obj = None # estimation if estimator == 'wham': estimator_obj = wham( ttrajs, us_dtrajs + md_dtrajs, _get_averaged_bias_matrix(btrajs, us_dtrajs + md_dtrajs), maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj) elif estimator == 'mbar': allowed_keys = ['direct_space'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = mbar( ttrajs, us_dtrajs + md_dtrajs, btrajs, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, **parsed_kwargs) elif estimator == 'dtram': allowed_keys = ['count_mode', 'connectivity'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = dtram( ttrajs, us_dtrajs + md_dtrajs, _get_averaged_bias_matrix(btrajs, us_dtrajs + md_dtrajs), lag, unbiased_state=unbiased_state, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr, **parsed_kwargs) elif estimator == 'tram': allowed_keys = [ 'count_mode', 'connectivity', 'connectivity_factor','nn', 'direct_space', 'N_dtram_accelerations', 'equilibrium', 'overcounting_factor', 'callback'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = tram( ttrajs, us_dtrajs + md_dtrajs, btrajs, lag, unbiased_state=unbiased_state, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr, **parsed_kwargs) # adding thermodynamic state information and return results try: estimator_obj.umbrella_centers = umbrella_centers estimator_obj.force_constants = force_constants except AttributeError: for obj in estimator_obj: obj.umbrella_centers = umbrella_centers obj.force_constants = force_constants return estimator_obj
def convertPossibleValues(val, possibleValues, invalidDefault, emptyValue=''): ''' convertPossibleValues - Convert input value to one of several possible values, with a default for invalid entries @param val <None/str> - The input value @param possibleValues list<str> - A list of possible values @param invalidDefault <None/str/Exception> - The value to return if "val" is not empty string/None and "val" is not in #possibleValues If instantiated Exception (like ValueError('blah')): Raise this exception If an Exception type ( like ValueError ) - Instantiate and raise this exception type Otherwise, use this raw value @param emptyValue Default '', used for an empty value (empty string or None) ''' from .utils import tostr # If null, retain null if val is None: if emptyValue is EMPTY_IS_INVALID: return _handleInvalid(invalidDefault) return emptyValue # Convert to a string val = tostr(val).lower() # If empty string, same as null if val == '': if emptyValue is EMPTY_IS_INVALID: return _handleInvalid(invalidDefault) return emptyValue # Check if this is a valid value if val not in possibleValues: return _handleInvalid(invalidDefault) return val
convertPossibleValues - Convert input value to one of several possible values, with a default for invalid entries @param val <None/str> - The input value @param possibleValues list<str> - A list of possible values @param invalidDefault <None/str/Exception> - The value to return if "val" is not empty string/None and "val" is not in #possibleValues If instantiated Exception (like ValueError('blah')): Raise this exception If an Exception type ( like ValueError ) - Instantiate and raise this exception type Otherwise, use this raw value @param emptyValue Default '', used for an empty value (empty string or None)
Below is the the instruction that describes the task: ### Input: convertPossibleValues - Convert input value to one of several possible values, with a default for invalid entries @param val <None/str> - The input value @param possibleValues list<str> - A list of possible values @param invalidDefault <None/str/Exception> - The value to return if "val" is not empty string/None and "val" is not in #possibleValues If instantiated Exception (like ValueError('blah')): Raise this exception If an Exception type ( like ValueError ) - Instantiate and raise this exception type Otherwise, use this raw value @param emptyValue Default '', used for an empty value (empty string or None) ### Response: def convertPossibleValues(val, possibleValues, invalidDefault, emptyValue=''): ''' convertPossibleValues - Convert input value to one of several possible values, with a default for invalid entries @param val <None/str> - The input value @param possibleValues list<str> - A list of possible values @param invalidDefault <None/str/Exception> - The value to return if "val" is not empty string/None and "val" is not in #possibleValues If instantiated Exception (like ValueError('blah')): Raise this exception If an Exception type ( like ValueError ) - Instantiate and raise this exception type Otherwise, use this raw value @param emptyValue Default '', used for an empty value (empty string or None) ''' from .utils import tostr # If null, retain null if val is None: if emptyValue is EMPTY_IS_INVALID: return _handleInvalid(invalidDefault) return emptyValue # Convert to a string val = tostr(val).lower() # If empty string, same as null if val == '': if emptyValue is EMPTY_IS_INVALID: return _handleInvalid(invalidDefault) return emptyValue # Check if this is a valid value if val not in possibleValues: return _handleInvalid(invalidDefault) return val
def lex(self, text): """Split self.text into a list of tokens. Args: text (str): text to parse Yields: Token: the tokens generated from the given text. """ pos = 0 while text: token_class, match = self.tokens.get_token(text) if token_class is not None: matched_text = text[match.start():match.end()] yield token_class(matched_text) text = text[match.end():] pos += match.end() elif text[0] in self.blank_chars: text = text[1:] pos += 1 else: raise LexerError( 'Invalid character %s in %s' % (text[0], text), position=pos) yield self.end_token()
Split self.text into a list of tokens. Args: text (str): text to parse Yields: Token: the tokens generated from the given text.
Below is the the instruction that describes the task: ### Input: Split self.text into a list of tokens. Args: text (str): text to parse Yields: Token: the tokens generated from the given text. ### Response: def lex(self, text): """Split self.text into a list of tokens. Args: text (str): text to parse Yields: Token: the tokens generated from the given text. """ pos = 0 while text: token_class, match = self.tokens.get_token(text) if token_class is not None: matched_text = text[match.start():match.end()] yield token_class(matched_text) text = text[match.end():] pos += match.end() elif text[0] in self.blank_chars: text = text[1:] pos += 1 else: raise LexerError( 'Invalid character %s in %s' % (text[0], text), position=pos) yield self.end_token()
def _parse_target(target): """Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data """ if len(target) != 8: raise ArgumentError("Invalid targeting data length", expected=8, length=len(target)) slot, match_op = struct.unpack("<B6xB", target) if match_op == _MATCH_CONTROLLER: return {'controller': True, 'slot': 0} elif match_op == _MATCH_SLOT: return {'controller': False, 'slot': slot} raise ArgumentError("Unsupported complex targeting specified", match_op=match_op)
Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data
Below is the the instruction that describes the task: ### Input: Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data ### Response: def _parse_target(target): """Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data """ if len(target) != 8: raise ArgumentError("Invalid targeting data length", expected=8, length=len(target)) slot, match_op = struct.unpack("<B6xB", target) if match_op == _MATCH_CONTROLLER: return {'controller': True, 'slot': 0} elif match_op == _MATCH_SLOT: return {'controller': False, 'slot': slot} raise ArgumentError("Unsupported complex targeting specified", match_op=match_op)
def jsoned(struct, wrap=True, meta=None, struct_key='result', pre_render_callback=None): """ Provides a json dump of the struct Args: struct: The data to dump wrap (bool, optional): Specify whether to wrap the struct in an enclosing dict struct_key (str, optional): The string key which will contain the struct in the result dict meta (dict, optional): An optional dictonary to merge with the output dictionary. Examples: >>> jsoned([3,4,5]) ... '{"status": "success", "result": [3, 4, 5]}' >>> jsoned([3,4,5], wrap=False) ... '[3, 4, 5]' """ return _json.dumps( structured( struct, wrap=wrap, meta=meta, struct_key=struct_key, pre_render_callback=pre_render_callback), default=json_encoder)
Provides a json dump of the struct Args: struct: The data to dump wrap (bool, optional): Specify whether to wrap the struct in an enclosing dict struct_key (str, optional): The string key which will contain the struct in the result dict meta (dict, optional): An optional dictonary to merge with the output dictionary. Examples: >>> jsoned([3,4,5]) ... '{"status": "success", "result": [3, 4, 5]}' >>> jsoned([3,4,5], wrap=False) ... '[3, 4, 5]'
Below is the the instruction that describes the task: ### Input: Provides a json dump of the struct Args: struct: The data to dump wrap (bool, optional): Specify whether to wrap the struct in an enclosing dict struct_key (str, optional): The string key which will contain the struct in the result dict meta (dict, optional): An optional dictonary to merge with the output dictionary. Examples: >>> jsoned([3,4,5]) ... '{"status": "success", "result": [3, 4, 5]}' >>> jsoned([3,4,5], wrap=False) ... '[3, 4, 5]' ### Response: def jsoned(struct, wrap=True, meta=None, struct_key='result', pre_render_callback=None): """ Provides a json dump of the struct Args: struct: The data to dump wrap (bool, optional): Specify whether to wrap the struct in an enclosing dict struct_key (str, optional): The string key which will contain the struct in the result dict meta (dict, optional): An optional dictonary to merge with the output dictionary. Examples: >>> jsoned([3,4,5]) ... '{"status": "success", "result": [3, 4, 5]}' >>> jsoned([3,4,5], wrap=False) ... '[3, 4, 5]' """ return _json.dumps( structured( struct, wrap=wrap, meta=meta, struct_key=struct_key, pre_render_callback=pre_render_callback), default=json_encoder)