repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
klen/adrest
adrest/views.py
https://github.com/klen/adrest/blob/8b75c67123cffabe5ed98c222bb7ab43c904d89c/adrest/views.py#L165-L199
def check_owners(self, request, **resources): """ Check parents of current resource. Recursive scanning of the fact that the child has FK to the parent and in resources we have right objects. We check that in request like /author/1/book/2/page/3 Page object with pk=3 has ForeignKey field linked to Book object with pk=2 and Book with pk=2 has ForeignKey field linked to Author object with pk=1. :return bool: If success else raise Exception """ if self._meta.allow_public_access or not self._meta.parent: return True self.parent.check_owners(request, **resources) objects = resources.get(self._meta.name) if self._meta.model and self._meta.parent._meta.model and objects: pr = resources.get(self._meta.parent._meta.name) check = all( pr.pk == getattr( o, "%s_id" % self._meta.parent._meta.name, None) for o in as_tuple(objects)) if not pr or not check: # 403 Error if there is error in parent-children relationship raise HttpError( "Access forbidden.", status=status.HTTP_403_FORBIDDEN) return True
[ "def", "check_owners", "(", "self", ",", "request", ",", "*", "*", "resources", ")", ":", "if", "self", ".", "_meta", ".", "allow_public_access", "or", "not", "self", ".", "_meta", ".", "parent", ":", "return", "True", "self", ".", "parent", ".", "chec...
Check parents of current resource. Recursive scanning of the fact that the child has FK to the parent and in resources we have right objects. We check that in request like /author/1/book/2/page/3 Page object with pk=3 has ForeignKey field linked to Book object with pk=2 and Book with pk=2 has ForeignKey field linked to Author object with pk=1. :return bool: If success else raise Exception
[ "Check", "parents", "of", "current", "resource", "." ]
python
train
35.4
LEMS/pylems
lems/parser/LEMS.py
https://github.com/LEMS/pylems/blob/4eeb719d2f23650fe16c38626663b69b5c83818b/lems/parser/LEMS.py#L908-L935
def parse_exposure(self, node): """ Parses <Exposure> @param node: Node containing the <Exposure> element @type node: xml.etree.Element @raise ParseError: Raised when the exposure name is not being defined in the context of a component type. """ if self.current_component_type == None: self.raise_error('Exposures must be defined in a component type') try: name = node.lattrib['name'] except: self.raise_error('<Exposure> must specify a name') try: dimension = node.lattrib['dimension'] except: self.raise_error("Exposure '{0}' must specify a dimension", name) description = node.lattrib.get('description', '') self.current_component_type.add_exposure(Exposure(name, dimension, description))
[ "def", "parse_exposure", "(", "self", ",", "node", ")", ":", "if", "self", ".", "current_component_type", "==", "None", ":", "self", ".", "raise_error", "(", "'Exposures must be defined in a component type'", ")", "try", ":", "name", "=", "node", ".", "lattrib",...
Parses <Exposure> @param node: Node containing the <Exposure> element @type node: xml.etree.Element @raise ParseError: Raised when the exposure name is not being defined in the context of a component type.
[ "Parses", "<Exposure", ">" ]
python
train
31.142857
python-openxml/python-docx
docx/parts/styles.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/parts/styles.py#L45-L55
def _default_styles_xml(cls): """ Return a bytestream containing XML for a default styles part. """ path = os.path.join( os.path.split(__file__)[0], '..', 'templates', 'default-styles.xml' ) with open(path, 'rb') as f: xml_bytes = f.read() return xml_bytes
[ "def", "_default_styles_xml", "(", "cls", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "split", "(", "__file__", ")", "[", "0", "]", ",", "'..'", ",", "'templates'", ",", "'default-styles.xml'", ")", "with", "op...
Return a bytestream containing XML for a default styles part.
[ "Return", "a", "bytestream", "containing", "XML", "for", "a", "default", "styles", "part", "." ]
python
train
30.727273
learningequality/ricecooker
ricecooker/config.py
https://github.com/learningequality/ricecooker/blob/2f0385282500cb77ef2894646c6f9ce11bd7a853/ricecooker/config.py#L194-L203
def get_restore_path(filename): """ get_restore_path: returns path to directory for restoration points Args: filename (str): Name of file to store Returns: string path to file """ path = os.path.join(RESTORE_DIRECTORY, FILE_STORE_LOCATION) if not os.path.exists(path): os.makedirs(path) return os.path.join(path, filename + '.pickle')
[ "def", "get_restore_path", "(", "filename", ")", ":", "path", "=", "os", ".", "path", ".", "join", "(", "RESTORE_DIRECTORY", ",", "FILE_STORE_LOCATION", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "path", ")", ":", "os", ".", "makedirs", "...
get_restore_path: returns path to directory for restoration points Args: filename (str): Name of file to store Returns: string path to file
[ "get_restore_path", ":", "returns", "path", "to", "directory", "for", "restoration", "points", "Args", ":", "filename", "(", "str", ")", ":", "Name", "of", "file", "to", "store", "Returns", ":", "string", "path", "to", "file" ]
python
train
38.1
dwavesystems/dwave-cloud-client
dwave/cloud/utils.py
https://github.com/dwavesystems/dwave-cloud-client/blob/df3221a8385dc0c04d7b4d84f740bf3ad6706230/dwave/cloud/utils.py#L139-L141
def strip_tail(sequence, values): """Strip `values` from the end of `sequence`.""" return list(reversed(list(strip_head(reversed(sequence), values))))
[ "def", "strip_tail", "(", "sequence", ",", "values", ")", ":", "return", "list", "(", "reversed", "(", "list", "(", "strip_head", "(", "reversed", "(", "sequence", ")", ",", "values", ")", ")", ")", ")" ]
Strip `values` from the end of `sequence`.
[ "Strip", "values", "from", "the", "end", "of", "sequence", "." ]
python
train
52
pyblish/pyblish-qml
pyblish_qml/host.py
https://github.com/pyblish/pyblish-qml/blob/6095d18b2ec0afd0409a9b1a17e53b0658887283/pyblish_qml/host.py#L516-L529
def _install_maya(use_threaded_wrapper): """Helper function to Autodesk Maya support""" from maya import utils, cmds def threaded_wrapper(func, *args, **kwargs): return utils.executeInMainThreadWithResult( func, *args, **kwargs) sys.stdout.write("Setting up Pyblish QML in Maya\n") if cmds.about(version=True) == "2018": _remove_googleapiclient() _common_setup("Maya", threaded_wrapper, use_threaded_wrapper)
[ "def", "_install_maya", "(", "use_threaded_wrapper", ")", ":", "from", "maya", "import", "utils", ",", "cmds", "def", "threaded_wrapper", "(", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "utils", ".", "executeInMainThreadWithResult",...
Helper function to Autodesk Maya support
[ "Helper", "function", "to", "Autodesk", "Maya", "support" ]
python
train
32.214286
watson-developer-cloud/python-sdk
ibm_watson/compare_comply_v1.py
https://github.com/watson-developer-cloud/python-sdk/blob/4c2c9df4466fcde88975da9ecd834e6ba95eb353/ibm_watson/compare_comply_v1.py#L3563-L3575
def _from_dict(cls, _dict): """Initialize a LeadingSentence object from a json dictionary.""" args = {} if 'text' in _dict: args['text'] = _dict.get('text') if 'location' in _dict: args['location'] = Location._from_dict(_dict.get('location')) if 'element_locations' in _dict: args['element_locations'] = [ ElementLocations._from_dict(x) for x in (_dict.get('element_locations')) ] return cls(**args)
[ "def", "_from_dict", "(", "cls", ",", "_dict", ")", ":", "args", "=", "{", "}", "if", "'text'", "in", "_dict", ":", "args", "[", "'text'", "]", "=", "_dict", ".", "get", "(", "'text'", ")", "if", "'location'", "in", "_dict", ":", "args", "[", "'l...
Initialize a LeadingSentence object from a json dictionary.
[ "Initialize", "a", "LeadingSentence", "object", "from", "a", "json", "dictionary", "." ]
python
train
39.615385
pysathq/pysat
solvers/prepare.py
https://github.com/pysathq/pysat/blob/522742e8f2d4c6ac50ecd9087f7a346206774c67/solvers/prepare.py#L340-L375
def extract_archive(archive, solver, put_inside = False): """ Unzips/untars a previously downloaded archive file. """ print('extracting {0}'.format(archive)) root = os.path.join('solvers', solver if put_inside else '') if archive.endswith('.tar.gz'): if os.path.exists(archive[:-7]): shutil.rmtree(archive[:-7]) tfile = tarfile.open(archive, 'r:gz') tfile.extractall(root) # normally, directory should be the first name # but glucose4.1 has some garbage in the archive for name in tfile.getnames(): if not name.startswith('./.'): directory = name break elif archive.endswith('.zip'): if os.path.exists(archive[:-4]): shutil.rmtree(archive[:-4]) myzip = zipfile.ZipFile(archive, 'r') myzip.extractall(root) directory = myzip.namelist()[0] directory = directory.rstrip('/').split('/')[0] myzip.close() if not put_inside: if os.path.exists(os.path.join('solvers', solver)): shutil.rmtree(os.path.join('solvers', solver)) shutil.move(os.path.join('solvers', directory), os.path.join('solvers', solver))
[ "def", "extract_archive", "(", "archive", ",", "solver", ",", "put_inside", "=", "False", ")", ":", "print", "(", "'extracting {0}'", ".", "format", "(", "archive", ")", ")", "root", "=", "os", ".", "path", ".", "join", "(", "'solvers'", ",", "solver", ...
Unzips/untars a previously downloaded archive file.
[ "Unzips", "/", "untars", "a", "previously", "downloaded", "archive", "file", "." ]
python
train
33.277778
ThreatConnect-Inc/tcex
tcex/tcex_bin_package.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex_bin_package.py#L110-L132
def bundle_apps(self, bundle_name, bundle_apps): """Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. bundle_apps (list): A list of Apps to include in the bundle. """ bundle_file = os.path.join( self.app_path, self.args.outdir, '{}-bundle.zip'.format(bundle_name) ) z = zipfile.ZipFile(bundle_file, 'w') for app in bundle_apps: # update package data self.package_data['bundle'].append( {'action': 'Adding App:', 'output': os.path.basename(app)} ) z.write(app, os.path.basename(app)) # update package data self.package_data['bundle'].append( {'action': 'Created Bundle:', 'output': os.path.basename(bundle_file)} ) z.close()
[ "def", "bundle_apps", "(", "self", ",", "bundle_name", ",", "bundle_apps", ")", ":", "bundle_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "app_path", ",", "self", ".", "args", ".", "outdir", ",", "'{}-bundle.zip'", ".", "format", "(", ...
Bundle multiple Job or Playbook Apps (.tcx files) into a single zip file. Args: bundle_name (str): The output name of the bundle zip file. bundle_apps (list): A list of Apps to include in the bundle.
[ "Bundle", "multiple", "Job", "or", "Playbook", "Apps", "(", ".", "tcx", "files", ")", "into", "a", "single", "zip", "file", "." ]
python
train
38.782609
ismms-himc/clustergrammer2
clustergrammer2/clustergrammer_fun/__init__.py
https://github.com/ismms-himc/clustergrammer2/blob/5acea9bff7eda546cf0647b9e3647f631eb6f5f5/clustergrammer2/clustergrammer_fun/__init__.py#L583-L663
def sim_same_and_diff_category_samples(self, df, cat_index=1, dist_type='cosine', equal_var=False, plot_roc=True, precalc_dist=False, calc_roc=True): ''' Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category ''' cols = df.columns.tolist() if type(precalc_dist) == bool: # compute distnace between rows (transpose to get cols as rows) dist_arr = 1 - pdist(df.transpose(), metric=dist_type) else: dist_arr = precalc_dist # generate sample names with categories sample_combos = list(combinations(range(df.shape[1]),2)) sample_names = [str(ind) + '_same' if cols[x[0]][cat_index] == cols[x[1]][cat_index] else str(ind) + '_different' for ind, x in enumerate(sample_combos)] ser_dist = pd.Series(data=dist_arr, index=sample_names) # find same-cat sample comparisons same_cat = [x for x in sample_names if x.split('_')[1] == 'same'] # find diff-cat sample comparisons diff_cat = [x for x in sample_names if x.split('_')[1] == 'different'] # make series of same and diff category sample comparisons ser_same = ser_dist[same_cat] ser_same.name = 'Same Category' ser_diff = ser_dist[diff_cat] ser_diff.name = 'Different Category' sim_dict = {} roc_data = {} sim_data = {} sim_dict['same'] = ser_same sim_dict['diff'] = ser_diff pval_dict = {} ttest_stat, pval_dict['ttest'] = ttest_ind(ser_diff, ser_same, equal_var=equal_var) ttest_stat, pval_dict['mannwhitney'] = mannwhitneyu(ser_diff, ser_same) if calc_roc: # calc AUC true_index = list(np.ones(sim_dict['same'].shape[0])) false_index = list(np.zeros(sim_dict['diff'].shape[0])) y_true = true_index + false_index true_val = list(sim_dict['same'].get_values()) false_val = list(sim_dict['diff'].get_values()) y_score = true_val + false_val fpr, tpr, thresholds = roc_curve(y_true, y_score) inst_auc = auc(fpr, tpr) if plot_roc: plt.figure() plt.plot(fpr, tpr) plt.plot([0, 1], [0, 1], color='navy', linestyle='--') plt.figure(figsize=(10,10)) print('AUC', inst_auc) roc_data['true'] = y_true roc_data['score'] = y_score roc_data['fpr'] = fpr roc_data['tpr'] = tpr roc_data['thresholds'] = thresholds roc_data['auc'] = inst_auc sim_data['sim_dict'] = sim_dict sim_data['pval_dict'] = pval_dict sim_data['roc_data'] = roc_data return sim_data
[ "def", "sim_same_and_diff_category_samples", "(", "self", ",", "df", ",", "cat_index", "=", "1", ",", "dist_type", "=", "'cosine'", ",", "equal_var", "=", "False", ",", "plot_roc", "=", "True", ",", "precalc_dist", "=", "False", ",", "calc_roc", "=", "True",...
Calculate the similarity of samples from the same and different categories. The cat_index gives the index of the category, where 1 in the first category
[ "Calculate", "the", "similarity", "of", "samples", "from", "the", "same", "and", "different", "categories", ".", "The", "cat_index", "gives", "the", "index", "of", "the", "category", "where", "1", "in", "the", "first", "category" ]
python
train
34.123457
materialsproject/pymatgen
pymatgen/core/lattice.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/lattice.py#L456-L465
def reciprocal_lattice(self) -> "Lattice": """ Return the reciprocal lattice. Note that this is the standard reciprocal lattice used for solid state physics with a factor of 2 * pi. If you are looking for the crystallographic reciprocal lattice, use the reciprocal_lattice_crystallographic property. The property is lazily generated for efficiency. """ v = np.linalg.inv(self._matrix).T return Lattice(v * 2 * np.pi)
[ "def", "reciprocal_lattice", "(", "self", ")", "->", "\"Lattice\"", ":", "v", "=", "np", ".", "linalg", ".", "inv", "(", "self", ".", "_matrix", ")", ".", "T", "return", "Lattice", "(", "v", "*", "2", "*", "np", ".", "pi", ")" ]
Return the reciprocal lattice. Note that this is the standard reciprocal lattice used for solid state physics with a factor of 2 * pi. If you are looking for the crystallographic reciprocal lattice, use the reciprocal_lattice_crystallographic property. The property is lazily generated for efficiency.
[ "Return", "the", "reciprocal", "lattice", ".", "Note", "that", "this", "is", "the", "standard", "reciprocal", "lattice", "used", "for", "solid", "state", "physics", "with", "a", "factor", "of", "2", "*", "pi", ".", "If", "you", "are", "looking", "for", "...
python
train
47.9
marrabld/planarradpy
libplanarradpy/planrad.py
https://github.com/marrabld/planarradpy/blob/5095d1cb98d4f67a7c3108c9282f2d59253e89a8/libplanarradpy/planrad.py#L1014-L1087
def calc_directional_aop(self, report, parameter, parameter_dir): """ Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @ e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi> :param report: The planarrad report dictionary. should include the quadtables and the directional info :param parameter: parameter to calc. Currently only sub-surface reflectance rrs. :return: """ lg.debug('calculating the directional ' + parameter) tmp_zenith = [] param_zenith = parameter_dir.split(':')[0] param_azimuth = parameter_dir.split(':')[1] # --------------------------------------------------# # find the mean directions values # --------------------------------------------------# for i_iter in range(0, int(report['vn'][1])): tmp_zenith.append(report['Quad_solid_angle_mean_point_theta'][i_iter][:].split(',')[0]) #that was a pain! tmp_azimuth = report['Quad_solid_angle_mean_point_phi'][1] zenith = scipy.asarray(tmp_zenith, dtype=float) azimuth = scipy.fromstring(tmp_azimuth, dtype=float, sep=',') # --------------------------------------------------# # now grab the min and max index of the closest match # --------------------------------------------------# #min_zenith_idx = (scipy.abs(zenith - param_zenith)).argmin() from scipy import interpolate lw = scipy.zeros(int(report['band_count'][1])) for j_iter in range(0, int(report['band_count'][1])): if parameter == 'rrs': lg.info('Calculating directional rrs') tmp_lw = report['L_w_band_' + str(j_iter + 1)] elif parameter == 'Rrs': lg.info('Calculating directional Rrs') print(report.keys()) tmp_lw = report['L_it_band_' + str(j_iter + 1)] lw_scal = scipy.zeros((int(report['vn'][1]), int(report['hn'][1]))) # for the fist and last line we have to replicate the top and bottom circle for i_iter in range(0, int(report['hn'][1])): lw_scal[0, i_iter] = tmp_lw[0].split(',')[0] lw_scal[int(report['vn'][1]) - 1, i_iter] = tmp_lw[-1].split(',')[0] for i_iter in range(1, int(report['vn'][1]) - 1): lw_scal[i_iter, :] = scipy.asarray(tmp_lw[i_iter].split(','), dtype=float) # to do, make an array of zeros and loop over each list an apply to eah line. bruteforce f1 = interpolate.interp2d(zenith, azimuth, lw_scal) lw[j_iter] = f1(float(param_zenith), float(param_azimuth)) # ---- # Now we finally have L_w we calculate the rrs # ---- if parameter == 'rrs': tmp_rrs = lw / scipy.asarray(report['Ed_w'], dtype=float)[1:] # ignore the first val as that is depth of val elif parameter == 'Rrs': tmp_rrs = lw / scipy.asarray(report['Ed_a'], dtype=float)[1:] # ignore the first val as that is depth of val # make rrs a string so it can be written to file. rrs = ",".join(map(str, tmp_rrs)) return " ," + rrs
[ "def", "calc_directional_aop", "(", "self", ",", "report", ",", "parameter", ",", "parameter_dir", ")", ":", "lg", ".", "debug", "(", "'calculating the directional '", "+", "parameter", ")", "tmp_zenith", "=", "[", "]", "param_zenith", "=", "parameter_dir", ".",...
Will calcuate the directional AOP (only sub-surface rrs for now) if the direction is defined using @ e.g. rrs@32.0:45 where <zenith-theta>:<azimuth-phi> :param report: The planarrad report dictionary. should include the quadtables and the directional info :param parameter: parameter to calc. Currently only sub-surface reflectance rrs. :return:
[ "Will", "calcuate", "the", "directional", "AOP", "(", "only", "sub", "-", "surface", "rrs", "for", "now", ")", "if", "the", "direction", "is", "defined", "using", "@", "e", ".", "g", ".", "rrs@32", ".", "0", ":", "45", "where", "<zenith", "-", "theta...
python
test
43
vmlaker/mpipe
src/Stage.py
https://github.com/vmlaker/mpipe/blob/5a1804cf64271931f0cd3e4fff3e2b38291212dd/src/Stage.py#L84-L104
def build(self): """Create and start up the internal workers.""" # If there's no output tube, it means that this stage # is at the end of a fork (hasn't been linked to any stage downstream). # Therefore, create one output tube. if not self._output_tubes: self._output_tubes.append(self._worker_class.getTubeClass()()) self._worker_class.assemble( self._worker_args, self._input_tube, self._output_tubes, self._size, self._disable_result, self._do_stop_task, ) # Build all downstream stages. for stage in self._next_stages: stage.build()
[ "def", "build", "(", "self", ")", ":", "# If there's no output tube, it means that this stage", "# is at the end of a fork (hasn't been linked to any stage downstream).", "# Therefore, create one output tube.", "if", "not", "self", ".", "_output_tubes", ":", "self", ".", "_output_t...
Create and start up the internal workers.
[ "Create", "and", "start", "up", "the", "internal", "workers", "." ]
python
train
32.904762
saltstack/salt
salt/proxy/onyx.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/proxy/onyx.py#L447-L460
def find(pattern): ''' Find all instances where the pattern is in the running command .. code-block:: bash salt '*' onyx.cmd find '^snmp-server.*$' .. note:: This uses the `re.MULTILINE` regex format for python, and runs the regex against the whole show_run output. ''' matcher = re.compile(pattern, re.MULTILINE) return matcher.findall(show_run())
[ "def", "find", "(", "pattern", ")", ":", "matcher", "=", "re", ".", "compile", "(", "pattern", ",", "re", ".", "MULTILINE", ")", "return", "matcher", ".", "findall", "(", "show_run", "(", ")", ")" ]
Find all instances where the pattern is in the running command .. code-block:: bash salt '*' onyx.cmd find '^snmp-server.*$' .. note:: This uses the `re.MULTILINE` regex format for python, and runs the regex against the whole show_run output.
[ "Find", "all", "instances", "where", "the", "pattern", "is", "in", "the", "running", "command" ]
python
train
27.857143
PyCQA/pylint
pylint/checkers/classes.py
https://github.com/PyCQA/pylint/blob/2bf5c61a3ff6ae90613b81679de42c0f19aea600/pylint/checkers/classes.py#L1123-L1136
def visit_attribute(self, node): """check if the getattr is an access to a class member if so, register it. Also check for access to protected class member from outside its class (but ignore __special__ methods) """ # Check self if self._uses_mandatory_method_param(node): self._accessed.set_accessed(node) return if not self.linter.is_message_enabled("protected-access"): return self._check_protected_attribute_access(node)
[ "def", "visit_attribute", "(", "self", ",", "node", ")", ":", "# Check self", "if", "self", ".", "_uses_mandatory_method_param", "(", "node", ")", ":", "self", ".", "_accessed", ".", "set_accessed", "(", "node", ")", "return", "if", "not", "self", ".", "li...
check if the getattr is an access to a class member if so, register it. Also check for access to protected class member from outside its class (but ignore __special__ methods)
[ "check", "if", "the", "getattr", "is", "an", "access", "to", "a", "class", "member", "if", "so", "register", "it", ".", "Also", "check", "for", "access", "to", "protected", "class", "member", "from", "outside", "its", "class", "(", "but", "ignore", "__sp...
python
test
37.142857
tensorlayer/tensorlayer
tensorlayer/prepro.py
https://github.com/tensorlayer/tensorlayer/blob/aa9e52e36c7058a7e6fd81d36563ca6850b21956/tensorlayer/prepro.py#L631-L705
def projective_transform_by_points( x, src, dst, map_args=None, output_shape=None, order=1, mode='constant', cval=0.0, clip=True, preserve_range=False ): """Projective transform by given coordinates, usually 4 coordinates. see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). src : list or numpy The original coordinates, usually 4 coordinates of (width, height). dst : list or numpy The coordinates after transformation, the number of coordinates is the same with src. map_args : dictionary or None Keyword arguments passed to inverse map. output_shape : tuple of 2 int Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified. order : int The order of interpolation. The order has to be in the range 0-5: - 0 Nearest-neighbor - 1 Bi-linear (default) - 2 Bi-quadratic - 3 Bi-cubic - 4 Bi-quartic - 5 Bi-quintic mode : str One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`. Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad. cval : float Used in conjunction with mode `constant`, the value outside the image boundaries. clip : boolean Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : boolean Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float. Returns ------- numpy.array A processed image. Examples -------- Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3) >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h] >>> dst = [[10,10],[0,32],[32,0],[32,32]] >>> x = tl.prepro.projective_transform_by_points(X, src, dst) References ----------- - `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__ - `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__ """ if map_args is None: map_args = {} # if type(src) is list: if isinstance(src, list): # convert to numpy src = np.array(src) # if type(dst) is list: if isinstance(dst, list): dst = np.array(dst) if np.max(x) > 1: # convert to [0, 1] x = x / 255 m = transform.ProjectiveTransform() m.estimate(dst, src) warped = transform.warp( x, m, map_args=map_args, output_shape=output_shape, order=order, mode=mode, cval=cval, clip=clip, preserve_range=preserve_range ) return warped
[ "def", "projective_transform_by_points", "(", "x", ",", "src", ",", "dst", ",", "map_args", "=", "None", ",", "output_shape", "=", "None", ",", "order", "=", "1", ",", "mode", "=", "'constant'", ",", "cval", "=", "0.0", ",", "clip", "=", "True", ",", ...
Projective transform by given coordinates, usually 4 coordinates. see `scikit-image <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__. Parameters ----------- x : numpy.array An image with dimension of [row, col, channel] (default). src : list or numpy The original coordinates, usually 4 coordinates of (width, height). dst : list or numpy The coordinates after transformation, the number of coordinates is the same with src. map_args : dictionary or None Keyword arguments passed to inverse map. output_shape : tuple of 2 int Shape of the output image generated. By default the shape of the input image is preserved. Note that, even for multi-band images, only rows and columns need to be specified. order : int The order of interpolation. The order has to be in the range 0-5: - 0 Nearest-neighbor - 1 Bi-linear (default) - 2 Bi-quadratic - 3 Bi-cubic - 4 Bi-quartic - 5 Bi-quintic mode : str One of `constant` (default), `edge`, `symmetric`, `reflect` or `wrap`. Points outside the boundaries of the input are filled according to the given mode. Modes match the behaviour of numpy.pad. cval : float Used in conjunction with mode `constant`, the value outside the image boundaries. clip : boolean Whether to clip the output to the range of values of the input image. This is enabled by default, since higher order interpolation may produce values outside the given input range. preserve_range : boolean Whether to keep the original range of values. Otherwise, the input image is converted according to the conventions of img_as_float. Returns ------- numpy.array A processed image. Examples -------- Assume X is an image from CIFAR-10, i.e. shape == (32, 32, 3) >>> src = [[0,0],[0,32],[32,0],[32,32]] # [w, h] >>> dst = [[10,10],[0,32],[32,0],[32,32]] >>> x = tl.prepro.projective_transform_by_points(X, src, dst) References ----------- - `scikit-image : geometric transformations <http://scikit-image.org/docs/dev/auto_examples/applications/plot_geometric.html>`__ - `scikit-image : examples <http://scikit-image.org/docs/dev/auto_examples/index.html>`__
[ "Projective", "transform", "by", "given", "coordinates", "usually", "4", "coordinates", "." ]
python
valid
40.693333
nickmckay/LiPD-utilities
Python/lipd/directory.py
https://github.com/nickmckay/LiPD-utilities/blob/5dab6bbeffc5effd68e3a6beaca6b76aa928e860/Python/lipd/directory.py#L85-L112
def browse_dialog_file(): """ Open up a GUI browse dialog window and let to user select one or more files :return str _path: Target directory path :return list _files: List of selected files """ logger_directory.info("enter browse_dialog") # We make files a list, because the user can multi-select files. _files = [] _path = "" try: _go_to_package() _path_bytes = subprocess.check_output(['python', 'gui_file_browse.py']) _path = _fix_path_bytes(_path_bytes) _files = [i for i in _path] _path = os.path.dirname(_path[0]) logger_directory.info("chosen path: {}, chosen file: {}".format(_path, _files)) except IndexError: logger_directory.warn("directory: browse_dialog_file: IndexError: no file chosen") except Exception as e: logger_directory.error("directory: browse_dialog_file: UnknownError: {}".format(e)) logger_directory.info("exit browse_dialog_file") return _path, _files
[ "def", "browse_dialog_file", "(", ")", ":", "logger_directory", ".", "info", "(", "\"enter browse_dialog\"", ")", "# We make files a list, because the user can multi-select files.", "_files", "=", "[", "]", "_path", "=", "\"\"", "try", ":", "_go_to_package", "(", ")", ...
Open up a GUI browse dialog window and let to user select one or more files :return str _path: Target directory path :return list _files: List of selected files
[ "Open", "up", "a", "GUI", "browse", "dialog", "window", "and", "let", "to", "user", "select", "one", "or", "more", "files", ":", "return", "str", "_path", ":", "Target", "directory", "path", ":", "return", "list", "_files", ":", "List", "of", "selected",...
python
train
34.892857
learningequality/morango
morango/models.py
https://github.com/learningequality/morango/blob/c3ec2554b026f65ac5f0fc5c9d439277fbac14f9/morango/models.py#L549-L555
def deserialize(cls, dict_model): """Returns an unsaved class object based on the valid properties passed in.""" kwargs = {} for f in cls._meta.concrete_fields: if f.attname in dict_model: kwargs[f.attname] = dict_model[f.attname] return cls(**kwargs)
[ "def", "deserialize", "(", "cls", ",", "dict_model", ")", ":", "kwargs", "=", "{", "}", "for", "f", "in", "cls", ".", "_meta", ".", "concrete_fields", ":", "if", "f", ".", "attname", "in", "dict_model", ":", "kwargs", "[", "f", ".", "attname", "]", ...
Returns an unsaved class object based on the valid properties passed in.
[ "Returns", "an", "unsaved", "class", "object", "based", "on", "the", "valid", "properties", "passed", "in", "." ]
python
valid
43.571429
kstaniek/condoor
condoor/connection.py
https://github.com/kstaniek/condoor/blob/77c054b29d4e286c1d7aca2c74dff86b805e1fae/condoor/connection.py#L100-L111
def finalize(self): """Clean up the object. After calling this method the object can't be used anymore. This will be reworked when changing the logging model. """ self.pause_session_logging() self._disable_logging() self._msg_callback = None self._error_msg_callback = None self._warning_msg_callback = None self._info_msg_callback = None
[ "def", "finalize", "(", "self", ")", ":", "self", ".", "pause_session_logging", "(", ")", "self", ".", "_disable_logging", "(", ")", "self", ".", "_msg_callback", "=", "None", "self", ".", "_error_msg_callback", "=", "None", "self", ".", "_warning_msg_callback...
Clean up the object. After calling this method the object can't be used anymore. This will be reworked when changing the logging model.
[ "Clean", "up", "the", "object", "." ]
python
train
34
gtaylor/python-colormath
colormath/color_diff_matrix.py
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_diff_matrix.py#L11-L17
def delta_e_cie1976(lab_color_vector, lab_color_matrix): """ Calculates the Delta E (CIE1976) between `lab_color_vector` and all colors in `lab_color_matrix`. """ return numpy.sqrt( numpy.sum(numpy.power(lab_color_vector - lab_color_matrix, 2), axis=1))
[ "def", "delta_e_cie1976", "(", "lab_color_vector", ",", "lab_color_matrix", ")", ":", "return", "numpy", ".", "sqrt", "(", "numpy", ".", "sum", "(", "numpy", ".", "power", "(", "lab_color_vector", "-", "lab_color_matrix", ",", "2", ")", ",", "axis", "=", "...
Calculates the Delta E (CIE1976) between `lab_color_vector` and all colors in `lab_color_matrix`.
[ "Calculates", "the", "Delta", "E", "(", "CIE1976", ")", "between", "lab_color_vector", "and", "all", "colors", "in", "lab_color_matrix", "." ]
python
train
39.285714
ajenhl/tacl
tacl/__main__.py
https://github.com/ajenhl/tacl/blob/b8a343248e77f1c07a5a4ac133a9ad6e0b4781c2/tacl/__main__.py#L457-L481
def highlight_text(args, parser): """Outputs the result of highlighting a text.""" tokenizer = utils.get_tokenizer(args) corpus = utils.get_corpus(args) output_dir = os.path.abspath(args.output) if os.path.exists(output_dir): parser.exit(status=3, message='Output directory already exists, ' 'aborting.\n') os.makedirs(output_dir, exist_ok=True) if args.ngrams: if args.label is None or len(args.label) != len(args.ngrams): parser.error('There must be as many labels as there are files ' 'of n-grams') report = tacl.NgramHighlightReport(corpus, tokenizer) ngrams = [] for ngram_file in args.ngrams: ngrams.append(utils.get_ngrams(ngram_file)) minus_ngrams = [] if args.minus_ngrams: minus_ngrams = utils.get_ngrams(args.minus_ngrams) report.generate(args.output, args.base_name, ngrams, args.label, minus_ngrams) else: report = tacl.ResultsHighlightReport(corpus, tokenizer) report.generate(args.output, args.base_name, args.results)
[ "def", "highlight_text", "(", "args", ",", "parser", ")", ":", "tokenizer", "=", "utils", ".", "get_tokenizer", "(", "args", ")", "corpus", "=", "utils", ".", "get_corpus", "(", "args", ")", "output_dir", "=", "os", ".", "path", ".", "abspath", "(", "a...
Outputs the result of highlighting a text.
[ "Outputs", "the", "result", "of", "highlighting", "a", "text", "." ]
python
train
45.04
makinacorpus/django-tracking-fields
tracking_fields/decorators.py
https://github.com/makinacorpus/django-tracking-fields/blob/463313d0f9c0f8107a0413f4d418d1a8c2311981/tracking_fields/decorators.py#L35-L71
def _track_class_related_field(cls, field): """ Track a field on a related model """ # field = field on current model # related_field = field on related model (field, related_field) = field.split('__', 1) field_obj = cls._meta.get_field(field) related_cls = field_obj.remote_field.model related_name = field_obj.remote_field.get_accessor_name() if not hasattr(related_cls, '_tracked_related_fields'): setattr(related_cls, '_tracked_related_fields', {}) if related_field not in related_cls._tracked_related_fields.keys(): related_cls._tracked_related_fields[related_field] = [] # There can be several field from different or same model # related to a single model. # Thus _tracked_related_fields will be of the form: # { # 'field name on related model': [ # ('field name on current model', 'field name to current model'), # ('field name on another model', 'field name to another model'), # ... # ], # ... # } related_cls._tracked_related_fields[related_field].append( (field, related_name) ) _add_signals_to_cls(related_cls) # Detect m2m fields changes if isinstance(related_cls._meta.get_field(related_field), ManyToManyField): m2m_changed.connect( tracking_m2m, sender=getattr(related_cls, related_field).through, dispatch_uid=repr(related_cls), )
[ "def", "_track_class_related_field", "(", "cls", ",", "field", ")", ":", "# field = field on current model", "# related_field = field on related model", "(", "field", ",", "related_field", ")", "=", "field", ".", "split", "(", "'__'", ",", "1", ")", "field_obj", "="...
Track a field on a related model
[ "Track", "a", "field", "on", "a", "related", "model" ]
python
train
38.567568
markovmodel/msmtools
msmtools/analysis/sparse/fingerprints.py
https://github.com/markovmodel/msmtools/blob/54dc76dd2113a0e8f3d15d5316abab41402941be/msmtools/analysis/sparse/fingerprints.py#L339-L380
def relaxation_decomp(P, p0, obs, times=[1], k=None, ncv=None): r"""Relaxation experiment. The relaxation experiment describes the time-evolution of an expectation value starting in a non-equilibrium situation. Parameters ---------- P : (M, M) ndarray Transition matrix p0 : (M,) ndarray (optional) Initial distribution for a relaxation experiment obs : (M,) ndarray Observable, represented as vector on state space times : list of int (optional) List of times at which to compute expectation k : int (optional) Number of eigenvalues and amplitudes to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- res : ndarray Array of expectation value at given times """ R, D, L = rdl_decomposition(P, k=k, ncv=ncv) """Extract eigenvalues""" ev = np.diagonal(D) """Amplitudes""" amplitudes = np.dot(p0, R) * np.dot(L, obs) """Propgate eigenvalues""" times = np.asarray(times) ev_t = ev[np.newaxis, :] ** times[:, np.newaxis] """Compute result""" res = np.dot(ev_t, amplitudes) """Truncate imgainary part - is zero anyways""" res = res.real return res
[ "def", "relaxation_decomp", "(", "P", ",", "p0", ",", "obs", ",", "times", "=", "[", "1", "]", ",", "k", "=", "None", ",", "ncv", "=", "None", ")", ":", "R", ",", "D", ",", "L", "=", "rdl_decomposition", "(", "P", ",", "k", "=", "k", ",", "...
r"""Relaxation experiment. The relaxation experiment describes the time-evolution of an expectation value starting in a non-equilibrium situation. Parameters ---------- P : (M, M) ndarray Transition matrix p0 : (M,) ndarray (optional) Initial distribution for a relaxation experiment obs : (M,) ndarray Observable, represented as vector on state space times : list of int (optional) List of times at which to compute expectation k : int (optional) Number of eigenvalues and amplitudes to use for computation ncv : int (optional) The number of Lanczos vectors generated, `ncv` must be greater than k; it is recommended that ncv > 2*k Returns ------- res : ndarray Array of expectation value at given times
[ "r", "Relaxation", "experiment", "." ]
python
train
30.833333
cloudera/cm_api
python/src/cm_api/endpoints/users.py
https://github.com/cloudera/cm_api/blob/5d2512375bd94684b4da36df9e0d9177865ffcbb/python/src/cm_api/endpoints/users.py#L21-L30
def get_all_users(resource_root, view=None): """ Get all users. @param resource_root: The root Resource object @param view: View to materialize ('full' or 'summary'). @return: A list of ApiUser objects. """ return call(resource_root.get, USERS_PATH, ApiUser, True, params=view and dict(view=view) or None)
[ "def", "get_all_users", "(", "resource_root", ",", "view", "=", "None", ")", ":", "return", "call", "(", "resource_root", ".", "get", ",", "USERS_PATH", ",", "ApiUser", ",", "True", ",", "params", "=", "view", "and", "dict", "(", "view", "=", "view", "...
Get all users. @param resource_root: The root Resource object @param view: View to materialize ('full' or 'summary'). @return: A list of ApiUser objects.
[ "Get", "all", "users", "." ]
python
train
31.7
hyperledger/sawtooth-core
validator/sawtooth_validator/networking/dispatch.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/networking/dispatch.py#L125-L141
def remove_send_last_message(self, connection): """Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages. """ if connection in self._send_last_message: del self._send_last_message[connection] LOGGER.debug("Removed send_last_message function " "for connection %s", connection) else: LOGGER.warning("Attempted to remove send_last_message " "function for connection %s, but no " "send_last_message function was registered", connection)
[ "def", "remove_send_last_message", "(", "self", ",", "connection", ")", ":", "if", "connection", "in", "self", ".", "_send_last_message", ":", "del", "self", ".", "_send_last_message", "[", "connection", "]", "LOGGER", ".", "debug", "(", "\"Removed send_last_messa...
Removes a send_last_message function previously registered with the Dispatcher. Args: connection (str): A locally unique identifier provided by the receiver of messages.
[ "Removes", "a", "send_last_message", "function", "previously", "registered", "with", "the", "Dispatcher", "." ]
python
train
44.117647
UCL-INGI/INGInious
inginious/frontend/pages/course_admin/submissions.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course_admin/submissions.py#L107-L111
def get_users(self, course): """ Returns a sorted list of users """ users = OrderedDict(sorted(list(self.user_manager.get_users_info(self.user_manager.get_course_registered_users(course)).items()), key=lambda k: k[1][0] if k[1] is not None else "")) return users
[ "def", "get_users", "(", "self", ",", "course", ")", ":", "users", "=", "OrderedDict", "(", "sorted", "(", "list", "(", "self", ".", "user_manager", ".", "get_users_info", "(", "self", ".", "user_manager", ".", "get_course_registered_users", "(", "course", "...
Returns a sorted list of users
[ "Returns", "a", "sorted", "list", "of", "users" ]
python
train
58.8
NASA-AMMOS/AIT-Core
ait/core/gds.py
https://github.com/NASA-AMMOS/AIT-Core/blob/9d85bd9c738e7a6a6fbdff672bea708238b02a3a/ait/core/gds.py#L306-L314
def extant_file(file): """ 'Type' for argparse - checks that file exists but does not open. """ if not os.path.exists(file): # Argparse uses the ArgumentTypeError to give a rejection message like: # error: argument input: file does not exist raise argparse.ArgumentTypeError("{0} does not exist".format(file)) return file
[ "def", "extant_file", "(", "file", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "file", ")", ":", "# Argparse uses the ArgumentTypeError to give a rejection message like:", "# error: argument input: file does not exist", "raise", "argparse", ".", "Argumen...
'Type' for argparse - checks that file exists but does not open.
[ "Type", "for", "argparse", "-", "checks", "that", "file", "exists", "but", "does", "not", "open", "." ]
python
train
39.666667
volafiled/python-volapi
volapi/volapi.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/volapi.py#L749-L768
def fileinfo(self, fid): """Ask lain about what he knows about given file. If the given file exists in the file dict, it will get updated.""" if not isinstance(fid, str): raise TypeError("Your file ID must be a string") try: info = self.conn.make_call_with_cb("getFileinfo", fid).get(timeout=5) if not info: warnings.warn( f"Your query for file with ID: '{fid}' failed.", RuntimeWarning ) elif fid in self.__files and not self.__files[fid].updated: self.__files[fid].fileupdate(info) except queue.Empty as ex: raise ValueError( "lain didn't produce a callback!\n" "Are you sure your query wasn't malformed?" ) from ex return info
[ "def", "fileinfo", "(", "self", ",", "fid", ")", ":", "if", "not", "isinstance", "(", "fid", ",", "str", ")", ":", "raise", "TypeError", "(", "\"Your file ID must be a string\"", ")", "try", ":", "info", "=", "self", ".", "conn", ".", "make_call_with_cb", ...
Ask lain about what he knows about given file. If the given file exists in the file dict, it will get updated.
[ "Ask", "lain", "about", "what", "he", "knows", "about", "given", "file", ".", "If", "the", "given", "file", "exists", "in", "the", "file", "dict", "it", "will", "get", "updated", "." ]
python
train
41.55
openvax/varcode
varcode/string_helpers.py
https://github.com/openvax/varcode/blob/981633db45ca2b31f76c06894a7360ea5d70a9b8/varcode/string_helpers.py#L18-L39
def trim_shared_prefix(ref, alt): """ Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix. """ n_ref = len(ref) n_alt = len(alt) n_min = min(n_ref, n_alt) i = 0 while i < n_min and ref[i] == alt[i]: i += 1 # guaranteed that ref and alt agree on all the characters # up to i'th position, so it doesn't matter which one we pull # the prefix out of prefix = ref[:i] ref_suffix = ref[i:] alt_suffix = alt[i:] return ref_suffix, alt_suffix, prefix
[ "def", "trim_shared_prefix", "(", "ref", ",", "alt", ")", ":", "n_ref", "=", "len", "(", "ref", ")", "n_alt", "=", "len", "(", "alt", ")", "n_min", "=", "min", "(", "n_ref", ",", "n_alt", ")", "i", "=", "0", "while", "i", "<", "n_min", "and", "...
Sometimes mutations are given with a shared prefix between the reference and alternate strings. Examples: C>CT (nucleotides) or GYFP>G (amino acids). This function trims the common prefix and returns the disjoint ref and alt strings, along with the shared prefix.
[ "Sometimes", "mutations", "are", "given", "with", "a", "shared", "prefix", "between", "the", "reference", "and", "alternate", "strings", ".", "Examples", ":", "C", ">", "CT", "(", "nucleotides", ")", "or", "GYFP", ">", "G", "(", "amino", "acids", ")", "....
python
train
32.454545
DMSC-Instrument-Data/lewis
src/lewis/core/control_server.py
https://github.com/DMSC-Instrument-Data/lewis/blob/931d96b8c761550a6a58f6e61e202690db04233a/src/lewis/core/control_server.py#L219-L233
def remove_object(self, name): """ Remove the object exposed under that name. If no object is registered under the supplied name, a RuntimeError is raised. :param name: Name of object to be removed. """ if name not in self._object_map: raise RuntimeError('No object with name {} is registered.'.format(name)) for fn_name in list(self._function_map.keys()): if fn_name.startswith(name + '.') or fn_name.startswith(name + ':'): self._remove_function(fn_name) del self._object_map[name]
[ "def", "remove_object", "(", "self", ",", "name", ")", ":", "if", "name", "not", "in", "self", ".", "_object_map", ":", "raise", "RuntimeError", "(", "'No object with name {} is registered.'", ".", "format", "(", "name", ")", ")", "for", "fn_name", "in", "li...
Remove the object exposed under that name. If no object is registered under the supplied name, a RuntimeError is raised. :param name: Name of object to be removed.
[ "Remove", "the", "object", "exposed", "under", "that", "name", ".", "If", "no", "object", "is", "registered", "under", "the", "supplied", "name", "a", "RuntimeError", "is", "raised", "." ]
python
train
38.4
ArduPilot/MAVProxy
MAVProxy/modules/mavproxy_devop.py
https://github.com/ArduPilot/MAVProxy/blob/f50bdeff33064876f7dc8dc4683d278ff47f75d5/MAVProxy/modules/mavproxy_devop.py#L37-L56
def devop_read(self, args, bustype): '''read from device''' if len(args) < 5: print("Usage: devop read <spi|i2c> name bus address regstart count") return name = args[0] bus = int(args[1],base=0) address = int(args[2],base=0) reg = int(args[3],base=0) count = int(args[4],base=0) self.master.mav.device_op_read_send(self.target_system, self.target_component, self.request_id, bustype, bus, address, name, reg, count) self.request_id += 1
[ "def", "devop_read", "(", "self", ",", "args", ",", "bustype", ")", ":", "if", "len", "(", "args", ")", "<", "5", ":", "print", "(", "\"Usage: devop read <spi|i2c> name bus address regstart count\"", ")", "return", "name", "=", "args", "[", "0", "]", "bus", ...
read from device
[ "read", "from", "device" ]
python
train
43.25
CxAalto/gtfspy
gtfspy/gtfs.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/gtfs.py#L957-L1018
def get_events_by_tripI_and_dsut(self, trip_I, day_start_ut, start_ut=None, end_ut=None): """ Get trip data as a list of events (i.e. dicts). Parameters ---------- trip_I : int shorthand index of the trip. day_start_ut : int the start time of the day in unix time (seconds) start_ut : int, optional consider only events that start after this time If not specified, this filtering is not applied. end_ut : int, optional Consider only events that end before this time If not specified, this filtering is not applied. Returns ------- events: list of dicts each element contains the following data: from_stop: int (stop_I) to_stop: int (stop_I) dep_time_ut: int (in unix time) arr_time_ut: int (in unix time) """ # for checking input: assert day_start_ut <= start_ut assert day_start_ut <= end_ut assert start_ut <= end_ut events = [] # check that trip takes place on that day: if not self.tripI_takes_place_on_dsut(trip_I, day_start_ut): return events query = """SELECT stop_I, arr_time_ds+?, dep_time_ds+? FROM stop_times JOIN stops USING(stop_I) WHERE (trip_I = ?) """ params = [day_start_ut, day_start_ut, trip_I] if start_ut: query += "AND (dep_time_ds > ?-?)" params += [start_ut, day_start_ut] if end_ut: query += "AND (arr_time_ds < ?-?)" params += [end_ut, day_start_ut] query += "ORDER BY arr_time_ds" cur = self.conn.cursor() rows = cur.execute(query, params) stop_data = list(rows) for i in range(len(stop_data) - 1): event = { "from_stop": stop_data[i][0], "to_stop": stop_data[i + 1][0], "dep_time_ut": stop_data[i][2], "arr_time_ut": stop_data[i + 1][1] } events.append(event) return events
[ "def", "get_events_by_tripI_and_dsut", "(", "self", ",", "trip_I", ",", "day_start_ut", ",", "start_ut", "=", "None", ",", "end_ut", "=", "None", ")", ":", "# for checking input:", "assert", "day_start_ut", "<=", "start_ut", "assert", "day_start_ut", "<=", "end_ut...
Get trip data as a list of events (i.e. dicts). Parameters ---------- trip_I : int shorthand index of the trip. day_start_ut : int the start time of the day in unix time (seconds) start_ut : int, optional consider only events that start after this time If not specified, this filtering is not applied. end_ut : int, optional Consider only events that end before this time If not specified, this filtering is not applied. Returns ------- events: list of dicts each element contains the following data: from_stop: int (stop_I) to_stop: int (stop_I) dep_time_ut: int (in unix time) arr_time_ut: int (in unix time)
[ "Get", "trip", "data", "as", "a", "list", "of", "events", "(", "i", ".", "e", ".", "dicts", ")", "." ]
python
valid
35.774194
horazont/aioxmpp
aioxmpp/carbons/service.py
https://github.com/horazont/aioxmpp/blob/22a68e5e1d23f2a4dee470092adbd4672f9ef061/aioxmpp/carbons/service.py#L74-L90
def enable(self): """ Enable message carbons. :raises RuntimeError: if the server does not support message carbons. :raises aioxmpp.XMPPError: if the server responded with an error to the request. :raises: as specified in :meth:`aioxmpp.Client.send` """ yield from self._check_for_feature() iq = aioxmpp.IQ( type_=aioxmpp.IQType.SET, payload=carbons_xso.Enable() ) yield from self.client.send(iq)
[ "def", "enable", "(", "self", ")", ":", "yield", "from", "self", ".", "_check_for_feature", "(", ")", "iq", "=", "aioxmpp", ".", "IQ", "(", "type_", "=", "aioxmpp", ".", "IQType", ".", "SET", ",", "payload", "=", "carbons_xso", ".", "Enable", "(", ")...
Enable message carbons. :raises RuntimeError: if the server does not support message carbons. :raises aioxmpp.XMPPError: if the server responded with an error to the request. :raises: as specified in :meth:`aioxmpp.Client.send`
[ "Enable", "message", "carbons", "." ]
python
train
30.705882
Jajcus/pyxmpp2
pyxmpp2/transport.py
https://github.com/Jajcus/pyxmpp2/blob/14a40a3950910a9cd008b55f0d8905aa0186ce18/pyxmpp2/transport.py#L571-L586
def wait_for_readability(self): """ Stop current thread until the channel is readable. :Return: `False` if it won't be readable (e.g. is closed) """ with self.lock: while True: if self._socket is None or self._eof: return False if self._state in ("connected", "closing"): return True if self._state == "tls-handshake" and \ self._tls_state == "want_read": return True self._state_cond.wait()
[ "def", "wait_for_readability", "(", "self", ")", ":", "with", "self", ".", "lock", ":", "while", "True", ":", "if", "self", ".", "_socket", "is", "None", "or", "self", ".", "_eof", ":", "return", "False", "if", "self", ".", "_state", "in", "(", "\"co...
Stop current thread until the channel is readable. :Return: `False` if it won't be readable (e.g. is closed)
[ "Stop", "current", "thread", "until", "the", "channel", "is", "readable", "." ]
python
valid
37.3125
mixmastamyk/console
console/windows.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/windows.py#L199-L203
def set_position(x, y, stream=STD_OUTPUT_HANDLE): ''' Sets current position of the cursor. ''' stream = kernel32.GetStdHandle(stream) value = x + (y << 16) kernel32.SetConsoleCursorPosition(stream, c_long(value))
[ "def", "set_position", "(", "x", ",", "y", ",", "stream", "=", "STD_OUTPUT_HANDLE", ")", ":", "stream", "=", "kernel32", ".", "GetStdHandle", "(", "stream", ")", "value", "=", "x", "+", "(", "y", "<<", "16", ")", "kernel32", ".", "SetConsoleCursorPositio...
Sets current position of the cursor.
[ "Sets", "current", "position", "of", "the", "cursor", "." ]
python
train
44.8
NetEaseGame/ATX
atx/drivers/webdriver.py
https://github.com/NetEaseGame/ATX/blob/f4415c57b45cb0730e08899cbc92a2af1c047ffb/atx/drivers/webdriver.py#L77-L85
def screenshot(self): """Take screenshot Return: PIL.Image """ url = urljoin(self.__device_url, "screenshot") r = httpdo('GET', url) raw_image = base64.b64decode(r.value) return Image.open(StringIO(raw_image))
[ "def", "screenshot", "(", "self", ")", ":", "url", "=", "urljoin", "(", "self", ".", "__device_url", ",", "\"screenshot\"", ")", "r", "=", "httpdo", "(", "'GET'", ",", "url", ")", "raw_image", "=", "base64", ".", "b64decode", "(", "r", ".", "value", ...
Take screenshot Return: PIL.Image
[ "Take", "screenshot", "Return", ":", "PIL", ".", "Image" ]
python
train
30.777778
StackStorm/pybind
pybind/nos/v6_0_2f/interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/nos/v6_0_2f/interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/__init__.py#L128-L149
def _set_valid_lifetime(self, v, load=False): """ Setter method for valid_lifetime, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/valid_lifetime (common-def:time-interval-sec) If this variable is read-only (config: false) in the source YANG file, then _set_valid_lifetime is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_valid_lifetime() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..4294967295']}), is_leaf=True, yang_name="valid-lifetime", rest_name="valid-lifetime", parent=self, choice=(u'ch-valid-type', u'ca-valid-lifetime'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configures valid lifetime', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """valid_lifetime must be of a type compatible with common-def:time-interval-sec""", 'defined-type': "common-def:time-interval-sec", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'0..4294967295']}), is_leaf=True, yang_name="valid-lifetime", rest_name="valid-lifetime", parent=self, choice=(u'ch-valid-type', u'ca-valid-lifetime'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configures valid lifetime', u'cli-drop-node-name': None, u'cli-incomplete-command': None}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)""", }) self.__valid_lifetime = t if hasattr(self, '_set'): self._set()
[ "def", "_set_valid_lifetime", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
Setter method for valid_lifetime, mapped from YANG variable /interface/fortygigabitethernet/ipv6/ipv6_nd_ra/ipv6_intf_cmds/nd/prefix/lifetime/valid_lifetime (common-def:time-interval-sec) If this variable is read-only (config: false) in the source YANG file, then _set_valid_lifetime is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_valid_lifetime() directly.
[ "Setter", "method", "for", "valid_lifetime", "mapped", "from", "YANG", "variable", "/", "interface", "/", "fortygigabitethernet", "/", "ipv6", "/", "ipv6_nd_ra", "/", "ipv6_intf_cmds", "/", "nd", "/", "prefix", "/", "lifetime", "/", "valid_lifetime", "(", "commo...
python
train
104.181818
maas/python-libmaas
maas/client/viscera/nodes.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/nodes.py#L140-L143
async def get_power_parameters(self): """Get the power paramters for this node.""" data = await self._handler.power_parameters(system_id=self.system_id) return data
[ "async", "def", "get_power_parameters", "(", "self", ")", ":", "data", "=", "await", "self", ".", "_handler", ".", "power_parameters", "(", "system_id", "=", "self", ".", "system_id", ")", "return", "data" ]
Get the power paramters for this node.
[ "Get", "the", "power", "paramters", "for", "this", "node", "." ]
python
train
46.25
UCL-INGI/INGInious
inginious/agent/__init__.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/agent/__init__.py#L147-L150
async def __handle_ping(self, _ : Ping): """ Handle a Ping message. Pong the backend """ self.__last_ping = time.time() await ZMQUtils.send(self.__backend_socket, Pong())
[ "async", "def", "__handle_ping", "(", "self", ",", "_", ":", "Ping", ")", ":", "self", ".", "__last_ping", "=", "time", ".", "time", "(", ")", "await", "ZMQUtils", ".", "send", "(", "self", ".", "__backend_socket", ",", "Pong", "(", ")", ")" ]
Handle a Ping message. Pong the backend
[ "Handle", "a", "Ping", "message", ".", "Pong", "the", "backend" ]
python
train
47.75
bitly/asyncmongo
asyncmongo/cursor.py
https://github.com/bitly/asyncmongo/blob/3da47c96d4592ec9e8b3ef5cf1b7d5b439ab3a5b/asyncmongo/cursor.py#L421-L430
def __query_options(self): """Get the query options string to use for this query.""" options = 0 if self.__tailable: options |= _QUERY_OPTIONS["tailable_cursor"] if self.__slave_okay or self.__pool._slave_okay: options |= _QUERY_OPTIONS["slave_okay"] if not self.__timeout: options |= _QUERY_OPTIONS["no_timeout"] return options
[ "def", "__query_options", "(", "self", ")", ":", "options", "=", "0", "if", "self", ".", "__tailable", ":", "options", "|=", "_QUERY_OPTIONS", "[", "\"tailable_cursor\"", "]", "if", "self", ".", "__slave_okay", "or", "self", ".", "__pool", ".", "_slave_okay"...
Get the query options string to use for this query.
[ "Get", "the", "query", "options", "string", "to", "use", "for", "this", "query", "." ]
python
train
40.3
sentinelsat/sentinelsat
sentinelsat/sentinel.py
https://github.com/sentinelsat/sentinelsat/blob/eacfd79ff4e7e939147db9dfdd393c67d64eecaa/sentinelsat/sentinel.py#L216-L247
def query_raw(self, query, order_by=None, limit=None, offset=0): """ Do a full-text query on the OpenSearch API using the format specified in https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release. Parameters ---------- query : str The query string. order_by: str, optional A comma-separated list of fields to order by (on server side). Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively. Ascending order is used, if prefix is omitted. Example: "cloudcoverpercentage, -beginposition". limit: int, optional Maximum number of products returned. Defaults to no limit. offset: int, optional The number of results to skip. Defaults to 0. Returns ------- dict[string, dict] Products returned by the query as a dictionary with the product ID as the key and the product's attributes (a dictionary) as the value. """ warnings.warn( "query_raw() has been merged with query(). use query(raw=...) instead.", PendingDeprecationWarning ) return self.query(raw=query, order_by=order_by, limit=limit, offset=offset)
[ "def", "query_raw", "(", "self", ",", "query", ",", "order_by", "=", "None", ",", "limit", "=", "None", ",", "offset", "=", "0", ")", ":", "warnings", ".", "warn", "(", "\"query_raw() has been merged with query(). use query(raw=...) instead.\"", ",", "PendingDepre...
Do a full-text query on the OpenSearch API using the format specified in https://scihub.copernicus.eu/twiki/do/view/SciHubUserGuide/3FullTextSearch DEPRECATED: use :meth:`query(raw=...) <.query>` instead. This method will be removed in the next major release. Parameters ---------- query : str The query string. order_by: str, optional A comma-separated list of fields to order by (on server side). Prefix the field name by '+' or '-' to sort in ascending or descending order, respectively. Ascending order is used, if prefix is omitted. Example: "cloudcoverpercentage, -beginposition". limit: int, optional Maximum number of products returned. Defaults to no limit. offset: int, optional The number of results to skip. Defaults to 0. Returns ------- dict[string, dict] Products returned by the query as a dictionary with the product ID as the key and the product's attributes (a dictionary) as the value.
[ "Do", "a", "full", "-", "text", "query", "on", "the", "OpenSearch", "API", "using", "the", "format", "specified", "in", "https", ":", "//", "scihub", ".", "copernicus", ".", "eu", "/", "twiki", "/", "do", "/", "view", "/", "SciHubUserGuide", "/", "3Ful...
python
train
44.0625
zarr-developers/zarr
zarr/core.py
https://github.com/zarr-developers/zarr/blob/fb8e6d5ea6bc26e451e5cf0eaaee36977556d5b5/zarr/core.py#L2102-L2242
def view(self, shape=None, chunks=None, dtype=None, fill_value=None, filters=None, read_only=None, synchronizer=None): """Return an array sharing the same data. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. dtype : string or dtype, optional NumPy dtype. fill_value : object Default value to use for uninitialized portions of the array. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. read_only : bool, optional True if array should be protected against modification. synchronizer : object, optional Array synchronizer. Notes ----- WARNING: This is an experimental feature and should be used with care. There are plenty of ways to generate errors and/or cause data corruption. Examples -------- Bypass filters: >>> import zarr >>> import numpy as np >>> np.random.seed(42) >>> labels = ['female', 'male'] >>> data = np.random.choice(labels, size=10000) >>> filters = [zarr.Categorize(labels=labels, ... dtype=data.dtype, ... astype='u1')] >>> a = zarr.array(data, chunks=1000, filters=filters) >>> a[:] array(['female', 'male', 'female', ..., 'male', 'male', 'female'], dtype='<U6') >>> v = a.view(dtype='u1', filters=[]) >>> v.is_view True >>> v[:] array([1, 2, 1, ..., 2, 2, 1], dtype=uint8) Views can be used to modify data: >>> x = v[:] >>> x.sort() >>> v[:] = x >>> v[:] array([1, 1, 1, ..., 2, 2, 2], dtype=uint8) >>> a[:] array(['female', 'female', 'female', ..., 'male', 'male', 'male'], dtype='<U6') View as a different dtype with the same item size: >>> data = np.random.randint(0, 2, size=10000, dtype='u1') >>> a = zarr.array(data, chunks=1000) >>> a[:] array([0, 0, 1, ..., 1, 0, 0], dtype=uint8) >>> v = a.view(dtype=bool) >>> v[:] array([False, False, True, ..., True, False, False]) >>> np.all(a[:].view(dtype=bool) == v[:]) True An array can be viewed with a dtype with a different item size, however some care is needed to adjust the shape and chunk shape so that chunk data is interpreted correctly: >>> data = np.arange(10000, dtype='u2') >>> a = zarr.array(data, chunks=1000) >>> a[:10] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16) >>> v = a.view(dtype='u1', shape=20000, chunks=2000) >>> v[:10] array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8) >>> np.all(a[:].view('u1') == v[:]) True Change fill value for uninitialized chunks: >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1') >>> a[:] array([-1, -1, -1, ..., -1, -1, -1], dtype=int8) >>> v = a.view(fill_value=42) >>> v[:] array([42, 42, 42, ..., 42, 42, 42], dtype=int8) Note that resizing or appending to views is not permitted: >>> a = zarr.empty(10000) >>> v = a.view() >>> try: ... v.resize(20000) ... except PermissionError as e: ... print(e) operation not permitted for views """ store = self._store chunk_store = self._chunk_store path = self._path if read_only is None: read_only = self._read_only if synchronizer is None: synchronizer = self._synchronizer a = Array(store=store, path=path, chunk_store=chunk_store, read_only=read_only, synchronizer=synchronizer, cache_metadata=True) a._is_view = True # allow override of some properties if dtype is None: dtype = self._dtype else: dtype = np.dtype(dtype) a._dtype = dtype if shape is None: shape = self._shape else: shape = normalize_shape(shape) a._shape = shape if chunks is not None: chunks = normalize_chunks(chunks, shape, dtype.itemsize) a._chunks = chunks if fill_value is not None: a._fill_value = fill_value if filters is not None: a._filters = filters return a
[ "def", "view", "(", "self", ",", "shape", "=", "None", ",", "chunks", "=", "None", ",", "dtype", "=", "None", ",", "fill_value", "=", "None", ",", "filters", "=", "None", ",", "read_only", "=", "None", ",", "synchronizer", "=", "None", ")", ":", "s...
Return an array sharing the same data. Parameters ---------- shape : int or tuple of ints Array shape. chunks : int or tuple of ints, optional Chunk shape. dtype : string or dtype, optional NumPy dtype. fill_value : object Default value to use for uninitialized portions of the array. filters : sequence, optional Sequence of filters to use to encode chunk data prior to compression. read_only : bool, optional True if array should be protected against modification. synchronizer : object, optional Array synchronizer. Notes ----- WARNING: This is an experimental feature and should be used with care. There are plenty of ways to generate errors and/or cause data corruption. Examples -------- Bypass filters: >>> import zarr >>> import numpy as np >>> np.random.seed(42) >>> labels = ['female', 'male'] >>> data = np.random.choice(labels, size=10000) >>> filters = [zarr.Categorize(labels=labels, ... dtype=data.dtype, ... astype='u1')] >>> a = zarr.array(data, chunks=1000, filters=filters) >>> a[:] array(['female', 'male', 'female', ..., 'male', 'male', 'female'], dtype='<U6') >>> v = a.view(dtype='u1', filters=[]) >>> v.is_view True >>> v[:] array([1, 2, 1, ..., 2, 2, 1], dtype=uint8) Views can be used to modify data: >>> x = v[:] >>> x.sort() >>> v[:] = x >>> v[:] array([1, 1, 1, ..., 2, 2, 2], dtype=uint8) >>> a[:] array(['female', 'female', 'female', ..., 'male', 'male', 'male'], dtype='<U6') View as a different dtype with the same item size: >>> data = np.random.randint(0, 2, size=10000, dtype='u1') >>> a = zarr.array(data, chunks=1000) >>> a[:] array([0, 0, 1, ..., 1, 0, 0], dtype=uint8) >>> v = a.view(dtype=bool) >>> v[:] array([False, False, True, ..., True, False, False]) >>> np.all(a[:].view(dtype=bool) == v[:]) True An array can be viewed with a dtype with a different item size, however some care is needed to adjust the shape and chunk shape so that chunk data is interpreted correctly: >>> data = np.arange(10000, dtype='u2') >>> a = zarr.array(data, chunks=1000) >>> a[:10] array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], dtype=uint16) >>> v = a.view(dtype='u1', shape=20000, chunks=2000) >>> v[:10] array([0, 0, 1, 0, 2, 0, 3, 0, 4, 0], dtype=uint8) >>> np.all(a[:].view('u1') == v[:]) True Change fill value for uninitialized chunks: >>> a = zarr.full(10000, chunks=1000, fill_value=-1, dtype='i1') >>> a[:] array([-1, -1, -1, ..., -1, -1, -1], dtype=int8) >>> v = a.view(fill_value=42) >>> v[:] array([42, 42, 42, ..., 42, 42, 42], dtype=int8) Note that resizing or appending to views is not permitted: >>> a = zarr.empty(10000) >>> v = a.view() >>> try: ... v.resize(20000) ... except PermissionError as e: ... print(e) operation not permitted for views
[ "Return", "an", "array", "sharing", "the", "same", "data", "." ]
python
train
34.028369
rsalmei/clearly
clearly/server.py
https://github.com/rsalmei/clearly/blob/fd784843d13f0fed28fc192565bec3668f1363f4/clearly/server.py#L148-L154
def find_task(self, request, context): """Finds one specific task.""" _log_request(request, context) task = self.listener.memory.tasks.get(request.task_uuid) if not task: return clearly_pb2.TaskMessage() return ClearlyServer._event_to_pb(task)[1]
[ "def", "find_task", "(", "self", ",", "request", ",", "context", ")", ":", "_log_request", "(", "request", ",", "context", ")", "task", "=", "self", ".", "listener", ".", "memory", ".", "tasks", ".", "get", "(", "request", ".", "task_uuid", ")", "if", ...
Finds one specific task.
[ "Finds", "one", "specific", "task", "." ]
python
train
41.714286
samuraisam/pyapns
pyapns/client.py
https://github.com/samuraisam/pyapns/blob/78c1875f28f8af51c7dd7f60d4436a8b282b0394/pyapns/client.py#L44-L68
def reprovision_and_retry(func): """ Wraps the `errback` callback of the API functions, automatically trying to re-provision if the app ID can not be found during the operation. If that's unsuccessful, it will raise the UnknownAppID error. """ @functools.wraps(func) def wrapper(*a, **kw): errback = kw.get('errback', None) if errback is None: def errback(e): raise e def errback_wrapper(e): if isinstance(e, UnknownAppID) and 'INITIAL' in OPTIONS: try: for initial in OPTIONS['INITIAL']: provision(*initial) # retry provisioning the initial setup func(*a, **kw) # and try the function once more except Exception, new_exc: errback(new_exc) # throwing the new exception else: errback(e) # not an instance of UnknownAppID - nothing we can do here kw['errback'] = errback_wrapper return func(*a, **kw) return wrapper
[ "def", "reprovision_and_retry", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "wrapper", "(", "*", "a", ",", "*", "*", "kw", ")", ":", "errback", "=", "kw", ".", "get", "(", "'errback'", ",", "None", ")", "if", "...
Wraps the `errback` callback of the API functions, automatically trying to re-provision if the app ID can not be found during the operation. If that's unsuccessful, it will raise the UnknownAppID error.
[ "Wraps", "the", "errback", "callback", "of", "the", "API", "functions", "automatically", "trying", "to", "re", "-", "provision", "if", "the", "app", "ID", "can", "not", "be", "found", "during", "the", "operation", ".", "If", "that", "s", "unsuccessful", "i...
python
train
36.68
JdeRobot/base
src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ualberta.py
https://github.com/JdeRobot/base/blob/303b18992785b2fe802212f2d758a60873007f1f/src/drivers/MAVLinkServer/MAVProxy/pymavlink/dialects/v20/ualberta.py#L7409-L7421
def radio_calibration_send(self, aileron, elevator, rudder, gyro, pitch, throttle, force_mavlink1=False): ''' Complete set of calibration parameters for the radio aileron : Aileron setpoints: left, center, right (uint16_t) elevator : Elevator setpoints: nose down, center, nose up (uint16_t) rudder : Rudder setpoints: nose left, center, nose right (uint16_t) gyro : Tail gyro mode/gain setpoints: heading hold, rate mode (uint16_t) pitch : Pitch curve setpoints (every 25%) (uint16_t) throttle : Throttle curve setpoints (every 25%) (uint16_t) ''' return self.send(self.radio_calibration_encode(aileron, elevator, rudder, gyro, pitch, throttle), force_mavlink1=force_mavlink1)
[ "def", "radio_calibration_send", "(", "self", ",", "aileron", ",", "elevator", ",", "rudder", ",", "gyro", ",", "pitch", ",", "throttle", ",", "force_mavlink1", "=", "False", ")", ":", "return", "self", ".", "send", "(", "self", ".", "radio_calibration_encod...
Complete set of calibration parameters for the radio aileron : Aileron setpoints: left, center, right (uint16_t) elevator : Elevator setpoints: nose down, center, nose up (uint16_t) rudder : Rudder setpoints: nose left, center, nose right (uint16_t) gyro : Tail gyro mode/gain setpoints: heading hold, rate mode (uint16_t) pitch : Pitch curve setpoints (every 25%) (uint16_t) throttle : Throttle curve setpoints (every 25%) (uint16_t)
[ "Complete", "set", "of", "calibration", "parameters", "for", "the", "radio" ]
python
train
72.230769
glyph/automat
automat/_core.py
https://github.com/glyph/automat/blob/80c6eb925eeef120443f4f9c81398bea629884b8/automat/_core.py#L114-L126
def states(self): """ All valid states; "Q" in the mathematical description of a state machine. """ return frozenset( chain.from_iterable( (inState, outState) for (inState, inputSymbol, outState, outputSymbol) in self._transitions ) )
[ "def", "states", "(", "self", ")", ":", "return", "frozenset", "(", "chain", ".", "from_iterable", "(", "(", "inState", ",", "outState", ")", "for", "(", "inState", ",", "inputSymbol", ",", "outState", ",", "outputSymbol", ")", "in", "self", ".", "_trans...
All valid states; "Q" in the mathematical description of a state machine.
[ "All", "valid", "states", ";", "Q", "in", "the", "mathematical", "description", "of", "a", "state", "machine", "." ]
python
train
27.538462
eyurtsev/FlowCytometryTools
fabfile.py
https://github.com/eyurtsev/FlowCytometryTools/blob/4355632508b875273d68c7e2972c17668bcf7b40/fabfile.py#L95-L108
def pypi_register(server='pypitest'): """Register and prep user for PyPi upload. .. note:: May need to weak ~/.pypirc file per issue: http://stackoverflow.com/questions/1569315 """ base_command = 'python setup.py register' if server == 'pypitest': command = base_command + ' -r https://testpypi.python.org/pypi' else: command = base_command _execute_setup_command(command)
[ "def", "pypi_register", "(", "server", "=", "'pypitest'", ")", ":", "base_command", "=", "'python setup.py register'", "if", "server", "==", "'pypitest'", ":", "command", "=", "base_command", "+", "' -r https://testpypi.python.org/pypi'", "else", ":", "command", "=", ...
Register and prep user for PyPi upload. .. note:: May need to weak ~/.pypirc file per issue: http://stackoverflow.com/questions/1569315
[ "Register", "and", "prep", "user", "for", "PyPi", "upload", "." ]
python
train
29.571429
chrisvoncsefalvay/diffiehellman
diffiehellman/decorators.py
https://github.com/chrisvoncsefalvay/diffiehellman/blob/06e656ea918c6c069d931a4e9443cb4b57d0a0cb/diffiehellman/decorators.py#L32-L44
def requires_private_key(func): """ Decorator for functions that require the private key to be defined. """ def func_wrapper(self, *args, **kwargs): if hasattr(self, "_DiffieHellman__private_key"): func(self, *args, **kwargs) else: self.generate_private_key() func(self, *args, **kwargs) return func_wrapper
[ "def", "requires_private_key", "(", "func", ")", ":", "def", "func_wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "hasattr", "(", "self", ",", "\"_DiffieHellman__private_key\"", ")", ":", "func", "(", "self", ",", "*", ...
Decorator for functions that require the private key to be defined.
[ "Decorator", "for", "functions", "that", "require", "the", "private", "key", "to", "be", "defined", "." ]
python
train
28.384615
urinieto/msaf
msaf/input_output.py
https://github.com/urinieto/msaf/blob/9dbb57d77a1310465a65cc40f1641d083ca74385/msaf/input_output.py#L53-L101
def read_estimations(est_file, boundaries_id, labels_id=None, **params): """Reads the estimations (boundaries and/or labels) from a jams file containing the estimations of an algorithm. Parameters ---------- est_file : str Path to the estimated file (JAMS file). boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- boundaries : np.array((N,2)) Array containing the estimated boundaries in intervals. labels : np.array(N) Array containing the estimated labels. Empty array if labels_id is None. """ # Open file and read jams jam = jams.load(est_file) # Find correct estimation est = find_estimation(jam, boundaries_id, labels_id, params) if est is None: raise NoEstimationsError("No estimations for file: %s" % est_file) # Get data values all_boundaries, all_labels = est.to_interval_values() if params["hier"]: hier_bounds = defaultdict(list) hier_labels = defaultdict(list) for bounds, labels in zip(all_boundaries, all_labels): level = labels["level"] hier_bounds[level].append(bounds) hier_labels[level].append(labels["label"]) # Order all_boundaries = [] all_labels = [] for key in sorted(list(hier_bounds.keys())): all_boundaries.append(np.asarray(hier_bounds[key])) all_labels.append(np.asarray(hier_labels[key])) return all_boundaries, all_labels
[ "def", "read_estimations", "(", "est_file", ",", "boundaries_id", ",", "labels_id", "=", "None", ",", "*", "*", "params", ")", ":", "# Open file and read jams", "jam", "=", "jams", ".", "load", "(", "est_file", ")", "# Find correct estimation", "est", "=", "fi...
Reads the estimations (boundaries and/or labels) from a jams file containing the estimations of an algorithm. Parameters ---------- est_file : str Path to the estimated file (JAMS file). boundaries_id : str Identifier of the algorithm used to compute the boundaries. labels_id : str Identifier of the algorithm used to compute the labels. params : dict Additional search parameters. E.g. {"feature" : "pcp"}. Returns ------- boundaries : np.array((N,2)) Array containing the estimated boundaries in intervals. labels : np.array(N) Array containing the estimated labels. Empty array if labels_id is None.
[ "Reads", "the", "estimations", "(", "boundaries", "and", "/", "or", "labels", ")", "from", "a", "jams", "file", "containing", "the", "estimations", "of", "an", "algorithm", "." ]
python
test
33.979592
edibledinos/pwnypack
pwnypack/main.py
https://github.com/edibledinos/pwnypack/blob/e0a5a8e6ef3f4f1f7e1b91ee379711f4a49cb0e6/pwnypack/main.py#L36-L46
def binary_value_or_stdin(value): """ Return fsencoded value or read raw data from stdin if value is None. """ if value is None: reader = io.open(sys.stdin.fileno(), mode='rb', closefd=False) return reader.read() elif six.PY3: return os.fsencode(value) else: return value
[ "def", "binary_value_or_stdin", "(", "value", ")", ":", "if", "value", "is", "None", ":", "reader", "=", "io", ".", "open", "(", "sys", ".", "stdin", ".", "fileno", "(", ")", ",", "mode", "=", "'rb'", ",", "closefd", "=", "False", ")", "return", "r...
Return fsencoded value or read raw data from stdin if value is None.
[ "Return", "fsencoded", "value", "or", "read", "raw", "data", "from", "stdin", "if", "value", "is", "None", "." ]
python
train
28.818182
ParthKolekar/parthsql
parthsql/parthsql.py
https://github.com/ParthKolekar/parthsql/blob/98b69448aeaca1331c9db29bc85e731702a6b0d9/parthsql/parthsql.py#L136-L143
def get_column_list_prefixed(self): """ Returns a list of columns """ return map( lambda x: ".".join([self.name, x]), self.columns )
[ "def", "get_column_list_prefixed", "(", "self", ")", ":", "return", "map", "(", "lambda", "x", ":", "\".\"", ".", "join", "(", "[", "self", ".", "name", ",", "x", "]", ")", ",", "self", ".", "columns", ")" ]
Returns a list of columns
[ "Returns", "a", "list", "of", "columns" ]
python
train
24.125
saltstack/salt
salt/runners/vault.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/runners/vault.py#L166-L189
def _get_policies(minion_id, config): ''' Get the policies that should be applied to a token for minion_id ''' _, grains, _ = salt.utils.minions.get_minion_data(minion_id, __opts__) policy_patterns = config.get( 'policies', ['saltstack/minion/{minion}', 'saltstack/minions'] ) mappings = {'minion': minion_id, 'grains': grains or {}} policies = [] for pattern in policy_patterns: try: for expanded_pattern in _expand_pattern_lists(pattern, **mappings): policies.append( expanded_pattern.format(**mappings) .lower() # Vault requirement ) except KeyError: log.warning('Could not resolve policy pattern %s', pattern) log.debug('%s policies: %s', minion_id, policies) return policies
[ "def", "_get_policies", "(", "minion_id", ",", "config", ")", ":", "_", ",", "grains", ",", "_", "=", "salt", ".", "utils", ".", "minions", ".", "get_minion_data", "(", "minion_id", ",", "__opts__", ")", "policy_patterns", "=", "config", ".", "get", "(",...
Get the policies that should be applied to a token for minion_id
[ "Get", "the", "policies", "that", "should", "be", "applied", "to", "a", "token", "for", "minion_id" ]
python
train
40.166667
autokey/autokey
lib/autokey/qtapp.py
https://github.com/autokey/autokey/blob/35decb72f286ce68cd2a1f09ace8891a520b58d1/lib/autokey/qtapp.py#L305-L312
def show_configure(self): """ Show the configuration window, or deiconify (un-minimise) it if it's already open. """ logging.info("Displaying configuration window") self.configWindow.show() self.configWindow.showNormal() self.configWindow.activateWindow()
[ "def", "show_configure", "(", "self", ")", ":", "logging", ".", "info", "(", "\"Displaying configuration window\"", ")", "self", ".", "configWindow", ".", "show", "(", ")", "self", ".", "configWindow", ".", "showNormal", "(", ")", "self", ".", "configWindow", ...
Show the configuration window, or deiconify (un-minimise) it if it's already open.
[ "Show", "the", "configuration", "window", "or", "deiconify", "(", "un", "-", "minimise", ")", "it", "if", "it", "s", "already", "open", "." ]
python
train
38
PmagPy/PmagPy
pmagpy/pmag.py
https://github.com/PmagPy/PmagPy/blob/c7984f8809bf40fe112e53dcc311a33293b62d0b/pmagpy/pmag.py#L10888-L10971
def chart_maker(Int, Top, start=100, outfile='chart.txt'): """ Makes a chart for performing IZZI experiments. Print out the file and tape it to the oven. This chart will help keep track of the different steps. Z : performed in zero field - enter the temperature XXX.0 in the sio formatted measurement file created by the LabView program I : performed in the lab field written at the top of the form P : a pTRM step - performed at the temperature and in the lab field. Parameters __________ Int : list of intervals [e.g., 50,10,5] Top : list of upper bounds for each interval [e.g., 500, 550, 600] start : first temperature step, default is 100 outfile : name of output file, default is 'chart.txt' Output _________ creates a file with: file: write down the name of the measurement file field: write down the lab field for the infield steps (in uT) the type of step (Z: zerofield, I: infield, P: pTRM step temperature of the step and code for SIO-like treatment steps XXX.0 [zero field] XXX.1 [in field] XXX.2 [pTRM check] - done in a lab field date : date the step was performed run # : an optional run number zones I-III : field in the zones in the oven start : time the run was started sp : time the setpoint was reached cool : time cooling started """ low, k, iz = start, 0, 0 Tzero = [] f = open('chart.txt', 'w') vline = '\t%s\n' % ( ' | | | | | | | |') hline = '______________________________________________________________________________\n' f.write('file:_________________ field:___________uT\n\n\n') f.write('%s\n' % ( ' date | run# | zone I | zone II | zone III | start | sp | cool|')) f.write(hline) f.write('\t%s' % (' 0.0')) f.write(vline) f.write(hline) for k in range(len(Top)): for t in range(low, Top[k]+Int[k], Int[k]): if iz == 0: Tzero.append(t) # zero field first step f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) if len(Tzero) > 1: f.write('%s \t %s' % ('P', str(Tzero[-2])+'.'+str(2))) f.write(vline) f.write(hline) iz = 1 # infield after zero field first f.write('%s \t %s' % ('I', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) # f.write('%s \t %s'%('T',str(t)+'.'+str(3))) # print second zero field (tail check) # f.write(vline) # f.write(hline) elif iz == 1: # infield first step f.write('%s \t %s' % ('I', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) iz = 0 # zero field step (after infield) f.write('%s \t %s' % ('Z', str(t)+'.'+str(iz))) f.write(vline) f.write(hline) try: low = Top[k]+Int[k+1] # increment to next temp step except: f.close() print("output stored in: chart.txt")
[ "def", "chart_maker", "(", "Int", ",", "Top", ",", "start", "=", "100", ",", "outfile", "=", "'chart.txt'", ")", ":", "low", ",", "k", ",", "iz", "=", "start", ",", "0", ",", "0", "Tzero", "=", "[", "]", "f", "=", "open", "(", "'chart.txt'", ",...
Makes a chart for performing IZZI experiments. Print out the file and tape it to the oven. This chart will help keep track of the different steps. Z : performed in zero field - enter the temperature XXX.0 in the sio formatted measurement file created by the LabView program I : performed in the lab field written at the top of the form P : a pTRM step - performed at the temperature and in the lab field. Parameters __________ Int : list of intervals [e.g., 50,10,5] Top : list of upper bounds for each interval [e.g., 500, 550, 600] start : first temperature step, default is 100 outfile : name of output file, default is 'chart.txt' Output _________ creates a file with: file: write down the name of the measurement file field: write down the lab field for the infield steps (in uT) the type of step (Z: zerofield, I: infield, P: pTRM step temperature of the step and code for SIO-like treatment steps XXX.0 [zero field] XXX.1 [in field] XXX.2 [pTRM check] - done in a lab field date : date the step was performed run # : an optional run number zones I-III : field in the zones in the oven start : time the run was started sp : time the setpoint was reached cool : time cooling started
[ "Makes", "a", "chart", "for", "performing", "IZZI", "experiments", ".", "Print", "out", "the", "file", "and", "tape", "it", "to", "the", "oven", ".", "This", "chart", "will", "help", "keep", "track", "of", "the", "different", "steps", ".", "Z", ":", "p...
python
train
39.345238
shichao-an/115wangpan
u115/api.py
https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L490-L507
def download(self, obj, path=None, show_progress=True, resume=True, auto_retry=True, proapi=False): """ Download a file :param obj: :class:`.File` object :param str path: local path :param bool show_progress: whether to show download progress :param bool resume: whether to resume on unfinished downloads identified by filename :param bool auto_retry: whether to retry automatically upon closed transfer until the file's download is finished :param bool proapi: whether to use pro API """ url = obj.get_download_url(proapi) download(url, path=path, session=self.http.session, show_progress=show_progress, resume=resume, auto_retry=auto_retry)
[ "def", "download", "(", "self", ",", "obj", ",", "path", "=", "None", ",", "show_progress", "=", "True", ",", "resume", "=", "True", ",", "auto_retry", "=", "True", ",", "proapi", "=", "False", ")", ":", "url", "=", "obj", ".", "get_download_url", "(...
Download a file :param obj: :class:`.File` object :param str path: local path :param bool show_progress: whether to show download progress :param bool resume: whether to resume on unfinished downloads identified by filename :param bool auto_retry: whether to retry automatically upon closed transfer until the file's download is finished :param bool proapi: whether to use pro API
[ "Download", "a", "file" ]
python
train
43.833333
agile-geoscience/striplog
striplog/utils.py
https://github.com/agile-geoscience/striplog/blob/8033b673a151f96c29802b43763e863519a3124c/striplog/utils.py#L97-L120
def convert_field(self, value, conversion): """ Define some extra field conversion functions. """ try: # If the normal behaviour works, do it. s = super(CustomFormatter, self) return s.convert_field(value, conversion) except ValueError: funcs = {'s': str, # Default. 'r': repr, # Default. 'a': ascii, # Default. 'u': str.upper, 'l': str.lower, 'c': str.capitalize, 't': str.title, 'm': np.mean, 'µ': np.mean, 'v': np.var, 'd': np.std, '+': np.sum, '∑': np.sum, 'x': np.product, } return funcs.get(conversion)(value)
[ "def", "convert_field", "(", "self", ",", "value", ",", "conversion", ")", ":", "try", ":", "# If the normal behaviour works, do it.", "s", "=", "super", "(", "CustomFormatter", ",", "self", ")", "return", "s", ".", "convert_field", "(", "value", ",", "convers...
Define some extra field conversion functions.
[ "Define", "some", "extra", "field", "conversion", "functions", "." ]
python
test
36.708333
junzis/pyModeS
pyModeS/extra/aero.py
https://github.com/junzis/pyModeS/blob/8cd5655a04b08171a9ad5f1ffd232b7e0178ea53/pyModeS/extra/aero.py#L150-L154
def tas2eas(Vtas, H): """True Airspeed to Equivalent Airspeed""" rho = density(H) Veas = Vtas * np.sqrt(rho/rho0) return Veas
[ "def", "tas2eas", "(", "Vtas", ",", "H", ")", ":", "rho", "=", "density", "(", "H", ")", "Veas", "=", "Vtas", "*", "np", ".", "sqrt", "(", "rho", "/", "rho0", ")", "return", "Veas" ]
True Airspeed to Equivalent Airspeed
[ "True", "Airspeed", "to", "Equivalent", "Airspeed" ]
python
train
27.4
rodluger/everest
everest/missions/k2/pbs.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/missions/k2/pbs.py#L78-L111
def _Download(campaign, subcampaign): ''' Download all stars from a given campaign. This is called from ``missions/k2/download.pbs`` ''' # Are we doing a subcampaign? if subcampaign != -1: campaign = campaign + 0.1 * subcampaign # Get all star IDs for this campaign stars = [s[0] for s in GetK2Campaign(campaign)] nstars = len(stars) # Download the TPF data for each one for i, EPIC in enumerate(stars): print("Downloading data for EPIC %d (%d/%d)..." % (EPIC, i + 1, nstars)) if not os.path.exists(os.path.join(EVEREST_DAT, 'k2', 'c%02d' % int(campaign), ('%09d' % EPIC)[:4] + '00000', ('%09d' % EPIC)[4:], 'data.npz')): try: GetData(EPIC, season=campaign, download_only=True) except KeyboardInterrupt: sys.exit() except: # Some targets could be corrupted... print("ERROR downloading EPIC %d." % EPIC) exctype, value, tb = sys.exc_info() for line in traceback.format_exception_only(exctype, value): ln = line.replace('\n', '') print(ln) continue
[ "def", "_Download", "(", "campaign", ",", "subcampaign", ")", ":", "# Are we doing a subcampaign?", "if", "subcampaign", "!=", "-", "1", ":", "campaign", "=", "campaign", "+", "0.1", "*", "subcampaign", "# Get all star IDs for this campaign", "stars", "=", "[", "s...
Download all stars from a given campaign. This is called from ``missions/k2/download.pbs``
[ "Download", "all", "stars", "from", "a", "given", "campaign", ".", "This", "is", "called", "from", "missions", "/", "k2", "/", "download", ".", "pbs" ]
python
train
39.911765
wndhydrnt/python-oauth2
oauth2/store/memcache.py
https://github.com/wndhydrnt/python-oauth2/blob/abe3bf5f27bda2ff737cab387b040e2e6e85c2e2/oauth2/store/memcache.py#L98-L105
def delete_refresh_token(self, refresh_token): """ Deletes a refresh token after use :param refresh_token: The refresh token to delete. """ access_token = self.fetch_by_refresh_token(refresh_token) self.mc.delete(self._generate_cache_key(access_token.token)) self.mc.delete(self._generate_cache_key(refresh_token))
[ "def", "delete_refresh_token", "(", "self", ",", "refresh_token", ")", ":", "access_token", "=", "self", ".", "fetch_by_refresh_token", "(", "refresh_token", ")", "self", ".", "mc", ".", "delete", "(", "self", ".", "_generate_cache_key", "(", "access_token", "."...
Deletes a refresh token after use :param refresh_token: The refresh token to delete.
[ "Deletes", "a", "refresh", "token", "after", "use", ":", "param", "refresh_token", ":", "The", "refresh", "token", "to", "delete", "." ]
python
train
45.375
lesscpy/lesscpy
lesscpy/lessc/scope.py
https://github.com/lesscpy/lesscpy/blob/51e392fb4a3cd4ccfb6175e0e42ce7d2f6b78126/lesscpy/lessc/scope.py#L109-L120
def mixins(self, name): """ Search mixins for name. Allow '>' to be ignored. '.a .b()' == '.a > .b()' Args: name (string): Search term Returns: Mixin object list OR False """ m = self._smixins(name) if m: return m return self._smixins(name.replace('?>?', ' '))
[ "def", "mixins", "(", "self", ",", "name", ")", ":", "m", "=", "self", ".", "_smixins", "(", "name", ")", "if", "m", ":", "return", "m", "return", "self", ".", "_smixins", "(", "name", ".", "replace", "(", "'?>?'", ",", "' '", ")", ")" ]
Search mixins for name. Allow '>' to be ignored. '.a .b()' == '.a > .b()' Args: name (string): Search term Returns: Mixin object list OR False
[ "Search", "mixins", "for", "name", ".", "Allow", ">", "to", "be", "ignored", ".", ".", "a", ".", "b", "()", "==", ".", "a", ">", ".", "b", "()", "Args", ":", "name", "(", "string", ")", ":", "Search", "term", "Returns", ":", "Mixin", "object", ...
python
valid
29.083333
inasafe/inasafe
safe/utilities/settings.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/utilities/settings.py#L61-L97
def general_setting(key, default=None, expected_type=None, qsettings=None): """Helper function to get a value from settings. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object Note: The API for QSettings to get a value is different for PyQt and Qt C++. In PyQt we can specify the expected type. See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value """ if qsettings is None: qsettings = QSettings() try: if isinstance(expected_type, type): return qsettings.value(key, default, type=expected_type) else: return qsettings.value(key, default) except TypeError as e: LOGGER.debug('exception %s' % e) LOGGER.debug('%s %s %s' % (key, default, expected_type)) return qsettings.value(key, default)
[ "def", "general_setting", "(", "key", ",", "default", "=", "None", ",", "expected_type", "=", "None", ",", "qsettings", "=", "None", ")", ":", "if", "qsettings", "is", "None", ":", "qsettings", "=", "QSettings", "(", ")", "try", ":", "if", "isinstance", ...
Helper function to get a value from settings. :param key: Unique key for setting. :type key: basestring :param default: The default value in case of the key is not found or there is an error. :type default: basestring, None, boolean, int, float :param expected_type: The type of object expected. :type expected_type: type :param qsettings: A custom QSettings to use. If it's not defined, it will use the default one. :type qsettings: qgis.PyQt.QtCore.QSettings :returns: The value of the key in the setting. :rtype: object Note: The API for QSettings to get a value is different for PyQt and Qt C++. In PyQt we can specify the expected type. See: http://pyqt.sourceforge.net/Docs/PyQt4/qsettings.html#value
[ "Helper", "function", "to", "get", "a", "value", "from", "settings", "." ]
python
train
34.081081
cfobel/pygtk3-helpers
pygtk3_helpers/delegates.py
https://github.com/cfobel/pygtk3-helpers/blob/ae793cb34a5c1bbe40cc83bb8a6084f0eeed2519/pygtk3_helpers/delegates.py#L103-L112
def add_slave(self, slave, container_name="widget"): """Add a slave delegate """ cont = getattr(self, container_name, None) if cont is None: raise AttributeError( 'Container name must be a member of the delegate') cont.add(slave.widget) self.slaves.append(slave) return slave
[ "def", "add_slave", "(", "self", ",", "slave", ",", "container_name", "=", "\"widget\"", ")", ":", "cont", "=", "getattr", "(", "self", ",", "container_name", ",", "None", ")", "if", "cont", "is", "None", ":", "raise", "AttributeError", "(", "'Container na...
Add a slave delegate
[ "Add", "a", "slave", "delegate" ]
python
train
35
Microsoft/nni
examples/trials/sklearn/regression/main.py
https://github.com/Microsoft/nni/blob/c7cc8db32da8d2ec77a382a55089f4e17247ce41/examples/trials/sklearn/regression/main.py#L33-L46
def load_data(): '''Load dataset, use boston dataset''' boston = load_boston() X_train, X_test, y_train, y_test = train_test_split(boston.data, boston.target, random_state=99, test_size=0.25) #normalize data ss_X = StandardScaler() ss_y = StandardScaler() X_train = ss_X.fit_transform(X_train) X_test = ss_X.transform(X_test) y_train = ss_y.fit_transform(y_train[:, None])[:,0] y_test = ss_y.transform(y_test[:, None])[:,0] return X_train, X_test, y_train, y_test
[ "def", "load_data", "(", ")", ":", "boston", "=", "load_boston", "(", ")", "X_train", ",", "X_test", ",", "y_train", ",", "y_test", "=", "train_test_split", "(", "boston", ".", "data", ",", "boston", ".", "target", ",", "random_state", "=", "99", ",", ...
Load dataset, use boston dataset
[ "Load", "dataset", "use", "boston", "dataset" ]
python
train
35.428571
underscorephil/dayonelib
dayonelib/__init__.py
https://github.com/underscorephil/dayonelib/blob/4df134f601abcb033ec04cf7596f25ee25d44661/dayonelib/__init__.py#L68-L71
def time(self, t): """Convert any timestamp into a datetime and save as _time""" _time = arrow.get(t).format('YYYY-MM-DDTHH:mm:ss') self._time = datetime.datetime.strptime(_time, '%Y-%m-%dT%H:%M:%S')
[ "def", "time", "(", "self", ",", "t", ")", ":", "_time", "=", "arrow", ".", "get", "(", "t", ")", ".", "format", "(", "'YYYY-MM-DDTHH:mm:ss'", ")", "self", ".", "_time", "=", "datetime", ".", "datetime", ".", "strptime", "(", "_time", ",", "'%Y-%m-%d...
Convert any timestamp into a datetime and save as _time
[ "Convert", "any", "timestamp", "into", "a", "datetime", "and", "save", "as", "_time" ]
python
valid
55
erikrose/parsimonious
parsimonious/expressions.py
https://github.com/erikrose/parsimonious/blob/12263be5ceca89344905c2c3eb9ac5a603e976e1/parsimonious/expressions.py#L278-L285
def _uncached_match(self, text, pos, cache, error): """Return length of match, ``None`` if no match.""" m = self.re.match(text, pos) if m is not None: span = m.span() node = RegexNode(self, text, pos, pos + span[1] - span[0]) node.match = m # TODO: A terrible idea for cache size? return node
[ "def", "_uncached_match", "(", "self", ",", "text", ",", "pos", ",", "cache", ",", "error", ")", ":", "m", "=", "self", ".", "re", ".", "match", "(", "text", ",", "pos", ")", "if", "m", "is", "not", "None", ":", "span", "=", "m", ".", "span", ...
Return length of match, ``None`` if no match.
[ "Return", "length", "of", "match", "None", "if", "no", "match", "." ]
python
train
44.75
klahnakoski/pyLibrary
mo_math/vendor/strangman/stats.py
https://github.com/klahnakoski/pyLibrary/blob/fa2dcbc48fda8d26999baef400e9a98149e0b982/mo_math/vendor/strangman/stats.py#L358-L374
def moment(inlist, moment=1): """ Calculates the nth moment about the mean for a sample (defaults to the 1st moment). Used to calculate coefficients of skewness and kurtosis. Usage: lmoment(inlist,moment=1) Returns: appropriate moment (r) from pyLibrary. 1/n * SUM((inlist(i)-mean)**r) """ if moment == 1: return 0.0 else: mn = mean(inlist) n = len(inlist) s = 0 for x in inlist: s = s + (x - mn) ** moment return s / float(n)
[ "def", "moment", "(", "inlist", ",", "moment", "=", "1", ")", ":", "if", "moment", "==", "1", ":", "return", "0.0", "else", ":", "mn", "=", "mean", "(", "inlist", ")", "n", "=", "len", "(", "inlist", ")", "s", "=", "0", "for", "x", "in", "inl...
Calculates the nth moment about the mean for a sample (defaults to the 1st moment). Used to calculate coefficients of skewness and kurtosis. Usage: lmoment(inlist,moment=1) Returns: appropriate moment (r) from pyLibrary. 1/n * SUM((inlist(i)-mean)**r)
[ "Calculates", "the", "nth", "moment", "about", "the", "mean", "for", "a", "sample", "(", "defaults", "to", "the", "1st", "moment", ")", ".", "Used", "to", "calculate", "coefficients", "of", "skewness", "and", "kurtosis", "." ]
python
train
28.588235
bwohlberg/sporco
sporco/prox/_l1proj.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/prox/_l1proj.py#L79-L106
def _proj_l1_scalar_root(v, gamma): r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`. There is no `axis` parameter since the algorithm for computing the solution treats the input `v` as a single vector. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` Returns ------- x : ndarray Output array """ if norm_l1(v) <= gamma: return v else: av = np.abs(v) fn = lambda t: np.sum(np.maximum(0, av - t)) - gamma t = optim.brentq(fn, 0, av.max()) return prox_l1(v, t)
[ "def", "_proj_l1_scalar_root", "(", "v", ",", "gamma", ")", ":", "if", "norm_l1", "(", "v", ")", "<=", "gamma", ":", "return", "v", "else", ":", "av", "=", "np", ".", "abs", "(", "v", ")", "fn", "=", "lambda", "t", ":", "np", ".", "sum", "(", ...
r"""Projection operator of the :math:`\ell_1` norm. The solution is computed via the method of Sec. 6.5.2 in :cite:`parikh-2014-proximal`. There is no `axis` parameter since the algorithm for computing the solution treats the input `v` as a single vector. Parameters ---------- v : array_like Input array :math:`\mathbf{v}` gamma : float Parameter :math:`\gamma` Returns ------- x : ndarray Output array
[ "r", "Projection", "operator", "of", "the", ":", "math", ":", "\\", "ell_1", "norm", ".", "The", "solution", "is", "computed", "via", "the", "method", "of", "Sec", ".", "6", ".", "5", ".", "2", "in", ":", "cite", ":", "parikh", "-", "2014", "-", ...
python
train
24.892857
open-homeautomation/pknx
knxip/conversion.py
https://github.com/open-homeautomation/pknx/blob/a8aed8271563923c447aa330ba7c1c2927286f7a/knxip/conversion.py#L83-L95
def knx_to_date(knxdata): """Convert a 3 byte KNX data object to a date""" if len(knxdata) != 3: raise KNXException("Can only convert a 3 Byte object to date") year = knxdata[2] if year >= 90: year += 1900 else: year += 2000 return date(year, knxdata[1], knxdata[0])
[ "def", "knx_to_date", "(", "knxdata", ")", ":", "if", "len", "(", "knxdata", ")", "!=", "3", ":", "raise", "KNXException", "(", "\"Can only convert a 3 Byte object to date\"", ")", "year", "=", "knxdata", "[", "2", "]", "if", "year", ">=", "90", ":", "year...
Convert a 3 byte KNX data object to a date
[ "Convert", "a", "3", "byte", "KNX", "data", "object", "to", "a", "date" ]
python
train
23.461538
arviz-devs/arviz
arviz/data/datasets.py
https://github.com/arviz-devs/arviz/blob/d04d8da07f029fd2931f48d2f7f324cf393e5277/arviz/data/datasets.py#L68-L93
def get_data_home(data_home=None): """Return the path of the arviz data dir. This folder is used by some dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'arviz_data' in the user home folder. Alternatively, it can be set by the 'ARVIZ_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str | None The path to arviz data dir. """ if data_home is None: data_home = os.environ.get("ARVIZ_DATA", os.path.join("~", "arviz_data")) data_home = os.path.expanduser(data_home) if not os.path.exists(data_home): os.makedirs(data_home) return data_home
[ "def", "get_data_home", "(", "data_home", "=", "None", ")", ":", "if", "data_home", "is", "None", ":", "data_home", "=", "os", ".", "environ", ".", "get", "(", "\"ARVIZ_DATA\"", ",", "os", ".", "path", ".", "join", "(", "\"~\"", ",", "\"arviz_data\"", ...
Return the path of the arviz data dir. This folder is used by some dataset loaders to avoid downloading the data several times. By default the data dir is set to a folder named 'arviz_data' in the user home folder. Alternatively, it can be set by the 'ARVIZ_DATA' environment variable or programmatically by giving an explicit folder path. The '~' symbol is expanded to the user home folder. If the folder does not already exist, it is automatically created. Parameters ---------- data_home : str | None The path to arviz data dir.
[ "Return", "the", "path", "of", "the", "arviz", "data", "dir", "." ]
python
train
32.961538
deepmipt/DeepPavlov
deeppavlov/utils/alexa/bot.py
https://github.com/deepmipt/DeepPavlov/blob/f3e4a69a3764d25d2f5bad4f1f1aebc872b00f9c/deeppavlov/utils/alexa/bot.py#L74-L79
def run(self) -> None: """Thread run method implementation.""" while True: request = self.input_queue.get() response = self._handle_request(request) self.output_queue.put(response)
[ "def", "run", "(", "self", ")", "->", "None", ":", "while", "True", ":", "request", "=", "self", ".", "input_queue", ".", "get", "(", ")", "response", "=", "self", ".", "_handle_request", "(", "request", ")", "self", ".", "output_queue", ".", "put", ...
Thread run method implementation.
[ "Thread", "run", "method", "implementation", "." ]
python
test
37.833333
jaraco/jaraco.functools
jaraco/functools.py
https://github.com/jaraco/jaraco.functools/blob/cc972095e5aa2ae80d1d69d7ca84ee94178e869a/jaraco/functools.py#L426-L468
def save_method_args(method): """ Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args () """ args_and_kwargs = collections.namedtuple('args_and_kwargs', 'args kwargs') @functools.wraps(method) def wrapper(self, *args, **kwargs): attr_name = '_saved_' + method.__name__ attr = args_and_kwargs(args, kwargs) setattr(self, attr_name, attr) return method(self, *args, **kwargs) return wrapper
[ "def", "save_method_args", "(", "method", ")", ":", "args_and_kwargs", "=", "collections", ".", "namedtuple", "(", "'args_and_kwargs'", ",", "'args kwargs'", ")", "@", "functools", ".", "wraps", "(", "method", ")", "def", "wrapper", "(", "self", ",", "*", "a...
Wrap a method such that when it is called, the args and kwargs are saved on the method. >>> class MyClass: ... @save_method_args ... def method(self, a, b): ... print(a, b) >>> my_ob = MyClass() >>> my_ob.method(1, 2) 1 2 >>> my_ob._saved_method.args (1, 2) >>> my_ob._saved_method.kwargs {} >>> my_ob.method(a=3, b='foo') 3 foo >>> my_ob._saved_method.args () >>> my_ob._saved_method.kwargs == dict(a=3, b='foo') True The arguments are stored on the instance, allowing for different instance to save different args. >>> your_ob = MyClass() >>> your_ob.method({str('x'): 3}, b=[4]) {'x': 3} [4] >>> your_ob._saved_method.args ({'x': 3},) >>> my_ob._saved_method.args ()
[ "Wrap", "a", "method", "such", "that", "when", "it", "is", "called", "the", "args", "and", "kwargs", "are", "saved", "on", "the", "method", "." ]
python
train
23.790698
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L579-L614
def subseparable_conv(inputs, filters, kernel_size, **kwargs): """Sub-separable convolution. If separability == 0 it's a separable_conv.""" def conv_fn(inputs, filters, kernel_size, **kwargs): """Sub-separable convolution, splits into separability-many blocks.""" separability = None if "separability" in kwargs: separability = kwargs.pop("separability") if separability: parts = [] abs_sep = separability if separability > 0 else -1 * separability for split_idx, split in enumerate(tf.split(inputs, abs_sep, axis=3)): with tf.variable_scope("part_%d" % split_idx): if separability > 0: parts.append( layers().Conv2D(filters // separability, kernel_size, **kwargs)(split)) else: parts.append( layers().SeparableConv2D(filters // abs_sep, kernel_size, **kwargs)(split)) if separability > 1: result = layers().Conv2D(filters, (1, 1))(tf.concat(parts, axis=3)) elif abs_sep == 1: # If we have just one block, return it. assert len(parts) == 1 result = parts[0] else: result = tf.concat(parts, axis=3) else: result = layers().SeparableConv2D(filters, kernel_size, **kwargs)(inputs) if separability is not None: kwargs["separability"] = separability return result return conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs)
[ "def", "subseparable_conv", "(", "inputs", ",", "filters", ",", "kernel_size", ",", "*", "*", "kwargs", ")", ":", "def", "conv_fn", "(", "inputs", ",", "filters", ",", "kernel_size", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Sub-separable convolution, splits i...
Sub-separable convolution. If separability == 0 it's a separable_conv.
[ "Sub", "-", "separable", "convolution", ".", "If", "separability", "==", "0", "it", "s", "a", "separable_conv", "." ]
python
train
42.027778
Kortemme-Lab/klab
klab/box_backup.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/box_backup.py#L665-L692
def read_sha1( file_path, buf_size = None, start_byte = 0, read_size = None, extra_hashers = [], # update(data) will be called on all of these ): ''' Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory ''' read_size = read_size or os.stat(file_path).st_size buf_size = buf_size or DEFAULT_BUFFER_SIZE data_read = 0 total_sha1 = hashlib.sha1() while data_read < read_size: with open( file_path, 'rb', buffering = 0 ) as f: f.seek( start_byte ) data = f.read( min(buf_size, read_size - data_read) ) assert( len(data) > 0 ) total_sha1.update( data ) for hasher in extra_hashers: hasher.update( data ) data_read += len(data) start_byte += len(data) assert( data_read == read_size ) return total_sha1
[ "def", "read_sha1", "(", "file_path", ",", "buf_size", "=", "None", ",", "start_byte", "=", "0", ",", "read_size", "=", "None", ",", "extra_hashers", "=", "[", "]", ",", "# update(data) will be called on all of these", ")", ":", "read_size", "=", "read_size", ...
Determines the sha1 hash of a file in chunks, to prevent loading the entire file at once into memory
[ "Determines", "the", "sha1", "hash", "of", "a", "file", "in", "chunks", "to", "prevent", "loading", "the", "entire", "file", "at", "once", "into", "memory" ]
python
train
31.678571
bitesofcode/projexui
projexui/widgets/xdocktoolbar.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xdocktoolbar.py#L313-L322
def labelForAction(self, action): """ Returns the label that contains the inputed action. :return <XDockActionLabel> || None """ for label in self.actionLabels(): if label.action() == action: return label return None
[ "def", "labelForAction", "(", "self", ",", "action", ")", ":", "for", "label", "in", "self", ".", "actionLabels", "(", ")", ":", "if", "label", ".", "action", "(", ")", "==", "action", ":", "return", "label", "return", "None" ]
Returns the label that contains the inputed action. :return <XDockActionLabel> || None
[ "Returns", "the", "label", "that", "contains", "the", "inputed", "action", ".", ":", "return", "<XDockActionLabel", ">", "||", "None" ]
python
train
30.5
wbond/asn1crypto
asn1crypto/core.py
https://github.com/wbond/asn1crypto/blob/ecda20176f55d37021cbca1f6da9083a8e491197/asn1crypto/core.py#L3476-L3521
def _determine_spec(self, index): """ Determine how a value for a field should be constructed :param index: The field number :return: A tuple containing the following elements: - unicode string of the field name - Asn1Value class of the field spec - Asn1Value class of the value spec - None or dict of params to pass to the field spec - None or Asn1Value class indicating the value spec was derived from an OID or a spec callback """ name, field_spec, field_params = self._fields[index] value_spec = field_spec spec_override = None if self._spec_callbacks is not None and name in self._spec_callbacks: callback = self._spec_callbacks[name] spec_override = callback(self) if spec_override: # Allow a spec callback to specify both the base spec and # the override, for situations such as OctetString and parse_as if spec_override.__class__ == tuple and len(spec_override) == 2: field_spec, value_spec = spec_override if value_spec is None: value_spec = field_spec spec_override = None # When no field spec is specified, use a single return value as that elif field_spec is None: field_spec = spec_override value_spec = field_spec spec_override = None else: value_spec = spec_override elif self._oid_nums is not None and self._oid_nums[1] == index: oid = self._lazy_child(self._oid_nums[0]).native if oid in self._oid_specs: spec_override = self._oid_specs[oid] value_spec = spec_override return (name, field_spec, value_spec, field_params, spec_override)
[ "def", "_determine_spec", "(", "self", ",", "index", ")", ":", "name", ",", "field_spec", ",", "field_params", "=", "self", ".", "_fields", "[", "index", "]", "value_spec", "=", "field_spec", "spec_override", "=", "None", "if", "self", ".", "_spec_callbacks"...
Determine how a value for a field should be constructed :param index: The field number :return: A tuple containing the following elements: - unicode string of the field name - Asn1Value class of the field spec - Asn1Value class of the value spec - None or dict of params to pass to the field spec - None or Asn1Value class indicating the value spec was derived from an OID or a spec callback
[ "Determine", "how", "a", "value", "for", "a", "field", "should", "be", "constructed" ]
python
train
42.304348
thomasbiddle/Kippt-for-Python
kippt/clips.py
https://github.com/thomasbiddle/Kippt-for-Python/blob/dddd0ff84d70ccf2d84e50e3cff7aad89f9c1267/kippt/clips.py#L150-L165
def update(self, **args): """ Updates a Clip. Parameters: - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/clip.md """ # JSONify our data. data = json.dumps(args) r = requests.put( "https://kippt.com/api/clips/%s" % (self.id), headers=self.kippt.header, data=data) return (r.json())
[ "def", "update", "(", "self", ",", "*", "*", "args", ")", ":", "# JSONify our data.", "data", "=", "json", ".", "dumps", "(", "args", ")", "r", "=", "requests", ".", "put", "(", "\"https://kippt.com/api/clips/%s\"", "%", "(", "self", ".", "id", ")", ",...
Updates a Clip. Parameters: - args Dictionary of other fields Accepted fields can be found here: https://github.com/kippt/api-documentation/blob/master/objects/clip.md
[ "Updates", "a", "Clip", "." ]
python
train
29.5
jgorset/django-respite
respite/decorators.py
https://github.com/jgorset/django-respite/blob/719469d11baf91d05917bab1623bd82adc543546/respite/decorators.py#L9-L22
def override_supported_formats(formats): """ Override the views class' supported formats for the decorated function. Arguments: formats -- A list of strings describing formats, e.g. ``['html', 'json']``. """ def decorator(function): @wraps(function) def wrapper(self, *args, **kwargs): self.supported_formats = formats return function(self, *args, **kwargs) return wrapper return decorator
[ "def", "override_supported_formats", "(", "formats", ")", ":", "def", "decorator", "(", "function", ")", ":", "@", "wraps", "(", "function", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "supporte...
Override the views class' supported formats for the decorated function. Arguments: formats -- A list of strings describing formats, e.g. ``['html', 'json']``.
[ "Override", "the", "views", "class", "supported", "formats", "for", "the", "decorated", "function", "." ]
python
train
32.357143
oscarlazoarjona/fast
fast/atomic_structure.py
https://github.com/oscarlazoarjona/fast/blob/3e5400672af2a7b7cc616e7f4aa10d7672720222/fast/atomic_structure.py#L1813-L1841
def calculate_reduced_matrix_elements(fine_states, convention=1): r"""Calculate the reduced matrix elements for a list of fine states. This function calculates the reduced matrix elments .. math:: \langle N,L,J||T^1(r)||N',L',J'\rangle given a list of fine states. We calculate the reduced matrix elements found in [SteckRb87]_ for the \ D1 and D2 lines in rubidium. >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> e1 = State("Rb", 87, 5, 1, 1/Integer(2)) >>> e2 = State("Rb", 87,5 , 1, 3/Integer(2)) >>> red = calculate_reduced_matrix_elements([g, e1, e2], convention=2) >>> print(red[0][1]) 2.99207750426 >>> print(red[0][2]) 4.22698361868 """ reduced_matrix_elements = [[reduced_matrix_element(ei, ej, convention=convention) for ej in fine_states] for ei in fine_states] return reduced_matrix_elements
[ "def", "calculate_reduced_matrix_elements", "(", "fine_states", ",", "convention", "=", "1", ")", ":", "reduced_matrix_elements", "=", "[", "[", "reduced_matrix_element", "(", "ei", ",", "ej", ",", "convention", "=", "convention", ")", "for", "ej", "in", "fine_s...
r"""Calculate the reduced matrix elements for a list of fine states. This function calculates the reduced matrix elments .. math:: \langle N,L,J||T^1(r)||N',L',J'\rangle given a list of fine states. We calculate the reduced matrix elements found in [SteckRb87]_ for the \ D1 and D2 lines in rubidium. >>> g = State("Rb", 87, 5, 0, 1/Integer(2)) >>> e1 = State("Rb", 87, 5, 1, 1/Integer(2)) >>> e2 = State("Rb", 87,5 , 1, 3/Integer(2)) >>> red = calculate_reduced_matrix_elements([g, e1, e2], convention=2) >>> print(red[0][1]) 2.99207750426 >>> print(red[0][2]) 4.22698361868
[ "r", "Calculate", "the", "reduced", "matrix", "elements", "for", "a", "list", "of", "fine", "states", "." ]
python
train
33.689655
geertj/gruvi
lib/gruvi/util.py
https://github.com/geertj/gruvi/blob/1d77ca439600b6ea7a19aa1ee85dca0f3be3f3f8/lib/gruvi/util.py#L94-L143
def delegate_method(other, method, name=None): """Add a method to the current class that delegates to another method. The *other* argument must be a property that returns the instance to delegate to. Due to an implementation detail, the property must be defined in the current class. The *method* argument specifies a method to delegate to. It can be any callable as long as it takes the instances as its first argument. It is a common paradigm in Gruvi to expose protocol methods onto clients. This keeps most of the logic into the protocol, but prevents the user from having to type ``'client.protocol.*methodname*'`` all the time. For example:: class MyClient(Client): protocol = Client.protocol delegate_method(protocol, MyProtocol.method) """ frame = sys._getframe(1) classdict = frame.f_locals @functools.wraps(method) def delegate(self, *args, **kwargs): other_self = other.__get__(self) return method(other_self, *args, **kwargs) if getattr(method, '__switchpoint__', False): delegate.__switchpoint__ = True if name is None: name = method.__name__ propname = None for key in classdict: if classdict[key] is other: propname = key break # If we know the property name, replace the docstring with a small # reference instead of copying the function docstring. if propname: qname = getattr(method, '__qualname__', method.__name__) if '.' in qname: delegate.__doc__ = 'A shorthand for ``self.{propname}.{name}()``.' \ .format(name=name, propname=propname) else: delegate.__doc__ = 'A shorthand for ``{name}({propname}, ...)``.' \ .format(name=name, propname=propname) classdict[name] = delegate
[ "def", "delegate_method", "(", "other", ",", "method", ",", "name", "=", "None", ")", ":", "frame", "=", "sys", ".", "_getframe", "(", "1", ")", "classdict", "=", "frame", ".", "f_locals", "@", "functools", ".", "wraps", "(", "method", ")", "def", "d...
Add a method to the current class that delegates to another method. The *other* argument must be a property that returns the instance to delegate to. Due to an implementation detail, the property must be defined in the current class. The *method* argument specifies a method to delegate to. It can be any callable as long as it takes the instances as its first argument. It is a common paradigm in Gruvi to expose protocol methods onto clients. This keeps most of the logic into the protocol, but prevents the user from having to type ``'client.protocol.*methodname*'`` all the time. For example:: class MyClient(Client): protocol = Client.protocol delegate_method(protocol, MyProtocol.method)
[ "Add", "a", "method", "to", "the", "current", "class", "that", "delegates", "to", "another", "method", "." ]
python
train
36.98
saltstack/salt
salt/utils/jinja.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/utils/jinja.py#L76-L83
def file_client(self): ''' Return a file client. Instantiates on first call. ''' if not self._file_client: self._file_client = salt.fileclient.get_file_client( self.opts, self.pillar_rend) return self._file_client
[ "def", "file_client", "(", "self", ")", ":", "if", "not", "self", ".", "_file_client", ":", "self", ".", "_file_client", "=", "salt", ".", "fileclient", ".", "get_file_client", "(", "self", ".", "opts", ",", "self", ".", "pillar_rend", ")", "return", "se...
Return a file client. Instantiates on first call.
[ "Return", "a", "file", "client", ".", "Instantiates", "on", "first", "call", "." ]
python
train
34.25
maxpumperla/hyperas
hyperas/utils.py
https://github.com/maxpumperla/hyperas/blob/6bf236682e2aa28401e48f73ebef269ad2937ae2/hyperas/utils.py#L154-L168
def unpack_hyperopt_vals(vals): """ Unpack values from a hyperopt return dictionary where values are wrapped in a list. :param vals: dict :return: dict copy of the dictionary with unpacked values """ assert isinstance(vals, dict), "Parameter must be given as dict." ret = {} for k, v in list(vals.items()): try: ret[k] = v[0] except (TypeError, IndexError): ret[k] = v return ret
[ "def", "unpack_hyperopt_vals", "(", "vals", ")", ":", "assert", "isinstance", "(", "vals", ",", "dict", ")", ",", "\"Parameter must be given as dict.\"", "ret", "=", "{", "}", "for", "k", ",", "v", "in", "list", "(", "vals", ".", "items", "(", ")", ")", ...
Unpack values from a hyperopt return dictionary where values are wrapped in a list. :param vals: dict :return: dict copy of the dictionary with unpacked values
[ "Unpack", "values", "from", "a", "hyperopt", "return", "dictionary", "where", "values", "are", "wrapped", "in", "a", "list", ".", ":", "param", "vals", ":", "dict", ":", "return", ":", "dict", "copy", "of", "the", "dictionary", "with", "unpacked", "values"...
python
train
29.933333
mozilla/python_moztelemetry
moztelemetry/shared_telemetry_utils.py
https://github.com/mozilla/python_moztelemetry/blob/09ddf1ec7d953a4308dfdcb0ed968f27bd5921bb/moztelemetry/shared_telemetry_utils.py#L74-L110
def writeDefinition(self, f, name): """Writes the string table to a file as a C const char array. This writes out the string table as one single C char array for memory size reasons, separating the individual strings with '\0' characters. This way we can index directly into the string array and avoid the additional storage costs for the pointers to them (and potential extra relocations for those). :param f: the output stream. :param name: the name of the output array. """ entries = self.table.items() entries.sort(key=lambda x: x[1]) # Avoid null-in-string warnings with GCC and potentially # overlong string constants; write everything out the long way. def explodeToCharArray(string): def toCChar(s): if s == "'": return "'\\''" else: return "'%s'" % s return ", ".join(map(toCChar, string)) f.write("const char %s[] = {\n" % name) for (string, offset) in entries: if "*/" in string: raise ValueError("String in string table contains unexpected sequence '*/': %s" % string) e = explodeToCharArray(string) if e: f.write(" /* %5d - \"%s\" */ %s, '\\0',\n" % (offset, string, explodeToCharArray(string))) else: f.write(" /* %5d - \"%s\" */ '\\0',\n" % (offset, string)) f.write("};\n\n")
[ "def", "writeDefinition", "(", "self", ",", "f", ",", "name", ")", ":", "entries", "=", "self", ".", "table", ".", "items", "(", ")", "entries", ".", "sort", "(", "key", "=", "lambda", "x", ":", "x", "[", "1", "]", ")", "# Avoid null-in-string warnin...
Writes the string table to a file as a C const char array. This writes out the string table as one single C char array for memory size reasons, separating the individual strings with '\0' characters. This way we can index directly into the string array and avoid the additional storage costs for the pointers to them (and potential extra relocations for those). :param f: the output stream. :param name: the name of the output array.
[ "Writes", "the", "string", "table", "to", "a", "file", "as", "a", "C", "const", "char", "array", "." ]
python
train
41.594595
elastic/elasticsearch-py
elasticsearch/client/xpack/ilm.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/ilm.py#L70-L78
def remove_policy(self, index=None, params=None): """ `<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html>`_ :arg index: The name of the index to remove policy on """ return self.transport.perform_request( "POST", _make_path(index, "_ilm", "remove"), params=params )
[ "def", "remove_policy", "(", "self", ",", "index", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "\"POST\"", ",", "_make_path", "(", "index", ",", "\"_ilm\"", ",", "\"remove\"", ")", "...
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/ilm-remove-policy.html>`_ :arg index: The name of the index to remove policy on
[ "<https", ":", "//", "www", ".", "elastic", ".", "co", "/", "guide", "/", "en", "/", "elasticsearch", "/", "reference", "/", "current", "/", "ilm", "-", "remove", "-", "policy", ".", "html", ">", "_" ]
python
train
39.555556
mikedh/trimesh
trimesh/exchange/urdf.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/exchange/urdf.py#L10-L163
def export_urdf(mesh, directory, scale=1.0, color=[0.75, 0.75, 0.75], **kwargs): """ Convert a Trimesh object into a URDF package for physics simulation. This breaks the mesh into convex pieces and writes them to the same directory as the .urdf file. Parameters --------- mesh : Trimesh object directory : str The directory path for the URDF package Returns --------- mesh : Trimesh object Multi-body mesh containing convex decomposition """ import lxml.etree as et # TODO: fix circular import from .export import export_mesh # Extract the save directory and the file name fullpath = os.path.abspath(directory) name = os.path.basename(fullpath) _, ext = os.path.splitext(name) if ext != '': raise ValueError('URDF path must be a directory!') # Create directory if needed if not os.path.exists(fullpath): os.mkdir(fullpath) elif not os.path.isdir(fullpath): raise ValueError('URDF path must be a directory!') # Perform a convex decomposition try: convex_pieces = convex_decomposition(mesh, **kwargs) if not isinstance(convex_pieces, list): convex_pieces = [convex_pieces] except BaseException: log.error('problem with convex decomposition, using hull', exc_info=True) convex_pieces = [mesh.convex_hull] # Get the effective density of the mesh effective_density = mesh.volume / sum([ m.volume for m in convex_pieces]) # open an XML tree root = et.Element('robot', name='root') # Loop through all pieces, adding each as a link prev_link_name = None for i, piece in enumerate(convex_pieces): # Save each nearly convex mesh out to a file piece_name = '{}_convex_piece_{}'.format(name, i) piece_filename = '{}.obj'.format(piece_name) piece_filepath = os.path.join(fullpath, piece_filename) export_mesh(piece, piece_filepath) # Set the mass properties of the piece piece.center_mass = mesh.center_mass piece.density = effective_density * mesh.density link_name = 'link_{}'.format(piece_name) geom_name = '{}'.format(piece_filename) I = [['{:.2E}'.format(y) for y in x] # NOQA for x in piece.moment_inertia] # Write the link out to the XML Tree link = et.SubElement(root, 'link', name=link_name) # Inertial information inertial = et.SubElement(link, 'inertial') et.SubElement(inertial, 'origin', xyz="0 0 0", rpy="0 0 0") et.SubElement(inertial, 'mass', value='{:.2E}'.format(piece.mass)) et.SubElement( inertial, 'inertia', ixx=I[0][0], ixy=I[0][1], ixz=I[0][2], iyy=I[1][1], iyz=I[1][2], izz=I[2][2]) # Visual Information visual = et.SubElement(link, 'visual') et.SubElement(visual, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(visual, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, scale="{:.4E} {:.4E} {:.4E}".format(scale, scale, scale)) material = et.SubElement(visual, 'material', name='') et.SubElement(material, 'color', rgba="{:.2E} {:.2E} {:.2E} 1".format(color[0], color[1], color[2])) # Collision Information collision = et.SubElement(link, 'collision') et.SubElement(collision, 'origin', xyz="0 0 0", rpy="0 0 0") geometry = et.SubElement(collision, 'geometry') et.SubElement(geometry, 'mesh', filename=geom_name, scale="{:.4E} {:.4E} {:.4E}".format(scale, scale, scale)) # Create rigid joint to previous link if prev_link_name is not None: joint_name = '{}_joint'.format(link_name) joint = et.SubElement(root, 'joint', name=joint_name, type='fixed') et.SubElement(joint, 'origin', xyz="0 0 0", rpy="0 0 0") et.SubElement(joint, 'parent', link=prev_link_name) et.SubElement(joint, 'child', link=link_name) prev_link_name = link_name # Write URDF file tree = et.ElementTree(root) urdf_filename = '{}.urdf'.format(name) tree.write(os.path.join(fullpath, urdf_filename), pretty_print=True) # Write Gazebo config file root = et.Element('model') model = et.SubElement(root, 'name') model.text = name version = et.SubElement(root, 'version') version.text = '1.0' sdf = et.SubElement(root, 'sdf', version='1.4') sdf.text = '{}.urdf'.format(name) author = et.SubElement(root, 'author') et.SubElement(author, 'name').text = 'trimesh {}'.format(trimesh_version) et.SubElement(author, 'email').text = 'blank@blank.blank' description = et.SubElement(root, 'description') description.text = name tree = et.ElementTree(root) tree.write(os.path.join(fullpath, 'model.config')) return np.sum(convex_pieces)
[ "def", "export_urdf", "(", "mesh", ",", "directory", ",", "scale", "=", "1.0", ",", "color", "=", "[", "0.75", ",", "0.75", ",", "0.75", "]", ",", "*", "*", "kwargs", ")", ":", "import", "lxml", ".", "etree", "as", "et", "# TODO: fix circular import", ...
Convert a Trimesh object into a URDF package for physics simulation. This breaks the mesh into convex pieces and writes them to the same directory as the .urdf file. Parameters --------- mesh : Trimesh object directory : str The directory path for the URDF package Returns --------- mesh : Trimesh object Multi-body mesh containing convex decomposition
[ "Convert", "a", "Trimesh", "object", "into", "a", "URDF", "package", "for", "physics", "simulation", ".", "This", "breaks", "the", "mesh", "into", "convex", "pieces", "and", "writes", "them", "to", "the", "same", "directory", "as", "the", ".", "urdf", "fil...
python
train
35.675325
capnproto/pycapnp
buildutils/bundle.py
https://github.com/capnproto/pycapnp/blob/cb3f190b955bdb1bfb6e0ac0b2f9306a5c79f7b5/buildutils/bundle.py#L53-L56
def localpath(*args): """construct an absolute path from a list relative to the root pycapnp directory""" plist = [ROOT] + list(args) return os.path.abspath(pjoin(*plist))
[ "def", "localpath", "(", "*", "args", ")", ":", "plist", "=", "[", "ROOT", "]", "+", "list", "(", "args", ")", "return", "os", ".", "path", ".", "abspath", "(", "pjoin", "(", "*", "plist", ")", ")" ]
construct an absolute path from a list relative to the root pycapnp directory
[ "construct", "an", "absolute", "path", "from", "a", "list", "relative", "to", "the", "root", "pycapnp", "directory" ]
python
train
45
pypa/pipenv
pipenv/patched/notpip/_internal/wheel.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/wheel.py#L790-L812
def format_command( command_args, # type: List[str] command_output, # type: str ): # type: (...) -> str """ Format command information for logging. """ text = 'Command arguments: {}\n'.format(command_args) if not command_output: text += 'Command output: None' elif logger.getEffectiveLevel() > logging.DEBUG: text += 'Command output: [use --verbose to show]' else: if not command_output.endswith('\n'): command_output += '\n' text += ( 'Command output:\n{}' '-----------------------------------------' ).format(command_output) return text
[ "def", "format_command", "(", "command_args", ",", "# type: List[str]", "command_output", ",", "# type: str", ")", ":", "# type: (...) -> str", "text", "=", "'Command arguments: {}\\n'", ".", "format", "(", "command_args", ")", "if", "not", "command_output", ":", "tex...
Format command information for logging.
[ "Format", "command", "information", "for", "logging", "." ]
python
train
27.826087
StorjOld/pyp2p
pyp2p/unl.py
https://github.com/StorjOld/pyp2p/blob/7024208c3af20511496a652ff212f54c420e0464/pyp2p/unl.py#L239-L373
def connect_handler(self, their_unl, events, force_master, hairpin, nonce): # Figure out who should make the connection. our_unl = self.value.encode("ascii") their_unl = their_unl.encode("ascii") master = self.is_master(their_unl) """ Master defines who connects if either side can. It's used to eliminate having multiple connections with the same host. """ if force_master: master = 1 # Deconstruct binary UNLs into dicts. our_unl = self.deconstruct(our_unl) their_unl = self.deconstruct(their_unl) if our_unl is None: raise Exception("Unable to deconstruct our UNL.") if their_unl is None: raise Exception("Unable to deconstruct their UNL.") # This means the nodes are behind the same router. if our_unl["wan_ip"] == their_unl["wan_ip"]: # Connect to LAN IP. our_unl["wan_ip"] = our_unl["lan_ip"] their_unl["wan_ip"] = their_unl["lan_ip"] # Already behind NAT so no forwarding needed. if hairpin: our_unl["node_type"] = "passive" their_unl["node_type"] = "passive" # Generate con ID. if nonce != "0" * 64: # Convert nonce to bytes. if sys.version_info >= (3, 0, 0): if type(nonce) == str: nonce.encode("ascii") else: if type(nonce) == unicode: nonce = str(nonce) # Check nonce length. assert(len(nonce) == 64) # Create con ID. con_id = self.net.generate_con_id( nonce, our_unl["wan_ip"], their_unl["wan_ip"] ) else: con_id = None # Acquire mutex. self.mutex.acquire() # Wait for other UNLs to finish. end_time = time.time() end_time += len(self.pending_unls) * 60 self.debug_print("Waiting for other unls to finish") while their_unl in self.pending_unls and time.time() < end_time: # This is an undifferentiated duplicate. if events is None: self.mutex.release() return time.sleep(1) self.debug_print("Other unl finished") is_exception = 0 try: # Wait for any other hole punches to finish. if (their_unl["node_type"] == "simultaneous" and our_unl["node_type"] != "passive"): self.pending_sim_open.append(their_unl["value"]) end_time = time.time() end_time += len(self.pending_unls) * 60 self.debug_print("wait for other hole punches to finish") while len(self.pending_sim_open) and time.time() < end_time: if self.pending_sim_open[0] == their_unl["value"]: break time.sleep(1) self.debug_print("other hole punches finished") # Set pending UNL. self.pending_unls.append(their_unl) # Release mutex. self.mutex.release() # Get connection. con = self.get_connection( our_unl, their_unl, master, nonce, force_master, con_id ) except Exception as e: is_exception = 1 print(e) print("EXCEPTION IN UNL.GET_CONNECTION") log_exception("error.log", parse_exception(e)) finally: # Release mutex. if self.mutex.locked() and is_exception: self.mutex.release() # Undo pending connect state. if their_unl in self.pending_unls: self.pending_unls.remove(their_unl) # Undo pending sim open. if len(self.pending_sim_open): if self.pending_sim_open[0] == their_unl["value"]: self.pending_sim_open = self.pending_sim_open[1:] # Only execute events if this function was called manually. if events is not None: # Success. if con is not None: if "success" in events: events["success"](con) # Failure. if con is None: if "failure" in events: events["failure"](con)
[ "def", "connect_handler", "(", "self", ",", "their_unl", ",", "events", ",", "force_master", ",", "hairpin", ",", "nonce", ")", ":", "# Figure out who should make the connection.\r", "our_unl", "=", "self", ".", "value", ".", "encode", "(", "\"ascii\"", ")", "th...
Master defines who connects if either side can. It's used to eliminate having multiple connections with the same host.
[ "Master", "defines", "who", "connects", "if", "either", "side", "can", ".", "It", "s", "used", "to", "eliminate", "having", "multiple", "connections", "with", "the", "same", "host", "." ]
python
train
33.651852
foliant-docs/foliantcontrib.init
foliant/cli/init/__init__.py
https://github.com/foliant-docs/foliantcontrib.init/blob/39aa38949b6270a750c800b79b4e71dd827f28d8/foliant/cli/init/__init__.py#L62-L161
def init(self, project_name='', template='base', quiet=False, debug=False): '''Generate new Foliant project.''' self.logger.setLevel(DEBUG if debug else WARNING) self.logger.info('Project creation started.') self.logger.debug(f'Template: {template}') template_path = Path(template) if not template_path.exists(): self.logger.debug( f'Template not found in {template_path}, looking in installed templates.' ) installed_templates_path = Path(Path(__file__).parent / 'templates') installed_templates = [ item.name for item in installed_templates_path.iterdir() if item.is_dir() ] self.logger.debug(f'Available templates: {installed_templates}') if template in installed_templates: self.logger.debug('Template found.') else: self.logger.debug('Template not found, asking for user input.') try: template = prompt( f'Please pick a template from {installed_templates}: ', completer=WordCompleter(installed_templates), validator=BuiltinTemplateValidator(installed_templates) ) except KeyboardInterrupt: self.logger.warning('Project creation interrupted.') return template_path = installed_templates_path / template self.logger.debug(f'Template path: {template_path}') if not project_name: self.logger.debug('Project name not specified, asking for user input.') try: project_name = prompt('Enter the project name: ') except KeyboardInterrupt: self.logger.warning('Project creation interrupted.') return project_slug = slugify(project_name) project_path = Path(project_slug) properties = { 'title': project_name, 'slug': project_slug } self.logger.debug(f'Project properties: {properties}') result = None with spinner('Generating project', self.logger, quiet, debug): copytree(template_path, project_path) text_types = '*.md', '*.yml', '*.txt', '*.py' text_file_paths = reduce( lambda acc, matches: acc + [*matches], (project_path.rglob(text_type) for text_type in text_types), [] ) for text_file_path in text_file_paths: self.logger.debug(f'Processing content of {text_file_path}') replace_placeholders(text_file_path, properties) for item in project_path.rglob('*'): self.logger.debug(f'Processing name of {item}') item.rename(Template(item.as_posix()).safe_substitute(properties)) result = project_path if result: self.logger.info(f'Result: {result}') if not quiet: print('─' * 20) print(f'Project "{project_name}" created in {result}') else: print(result) else: self.logger.critical('Project creation failed.') exit(1)
[ "def", "init", "(", "self", ",", "project_name", "=", "''", ",", "template", "=", "'base'", ",", "quiet", "=", "False", ",", "debug", "=", "False", ")", ":", "self", ".", "logger", ".", "setLevel", "(", "DEBUG", "if", "debug", "else", "WARNING", ")",...
Generate new Foliant project.
[ "Generate", "new", "Foliant", "project", "." ]
python
train
32.51
arne-cl/discoursegraphs
src/discoursegraphs/readwrite/exportxml.py
https://github.com/arne-cl/discoursegraphs/blob/842f0068a3190be2c75905754521b176b25a54fb/src/discoursegraphs/readwrite/exportxml.py#L701-L710
def get_sentence_id(self, element): """returns the ID of the sentence the given element belongs to.""" try: sentence_elem = element.iterancestors('sentence').next() except StopIteration as e: warnings.warn("<{}> element is not a descendant of a <sentence> " "We'll try to extract the sentence ID from the " "prefix of the element ID".format(element.tag)) return self.get_element_id(element).split('_')[0] return self.get_element_id(sentence_elem)
[ "def", "get_sentence_id", "(", "self", ",", "element", ")", ":", "try", ":", "sentence_elem", "=", "element", ".", "iterancestors", "(", "'sentence'", ")", ".", "next", "(", ")", "except", "StopIteration", "as", "e", ":", "warnings", ".", "warn", "(", "\...
returns the ID of the sentence the given element belongs to.
[ "returns", "the", "ID", "of", "the", "sentence", "the", "given", "element", "belongs", "to", "." ]
python
train
55.7
bretth/woven
woven/deploy.py
https://github.com/bretth/woven/blob/ec1da7b401a335f43129e7115fe7a4d145649f1e/woven/deploy.py#L4-L19
def post_install_postgresql(): """ example default hook for installing postgresql """ from django.conf import settings as s with settings(warn_only=True): sudo('/etc/init.d/postgresql-8.4 restart') sudo("""psql template1 -c "ALTER USER postgres with encrypted password '%s';" """% env.password, user='postgres') sudo("psql -f /usr/share/postgresql/8.4/contrib/adminpack.sql", user='postgres') if s.DATABASES['default']['ENGINE']=='django.db.backends.postgresql_psycopg2': sudo("""psql template1 -c "CREATE ROLE %s LOGIN with encrypted password '%s';" """% (s.DATABASES['default']['USER'],s.DATABASES['default']['PASSWORD']), user='postgres') sudo('createdb -T template0 -O %s %s'% (s.DATABASES['default']['USER'],s.DATABASES['default']['NAME']), user='postgres') print "* setup postgres user password with your '%s' password"% env.user print "* imported the adminpack" print "Post install setup of Postgresql complete!"
[ "def", "post_install_postgresql", "(", ")", ":", "from", "django", ".", "conf", "import", "settings", "as", "s", "with", "settings", "(", "warn_only", "=", "True", ")", ":", "sudo", "(", "'/etc/init.d/postgresql-8.4 restart'", ")", "sudo", "(", "\"\"\"psql templ...
example default hook for installing postgresql
[ "example", "default", "hook", "for", "installing", "postgresql" ]
python
train
62.8125
molmod/molmod
molmod/molecular_graphs.py
https://github.com/molmod/molmod/blob/a7b5b4364ed514ad4c465856c05b5eda1cb561e0/molmod/molecular_graphs.py#L298-L341
def add_hydrogens(self, formal_charges=None): """Returns a molecular graph where hydrogens are added explicitely When the bond order is unknown, it assumes bond order one. If the graph has an attribute formal_charges, this routine will take it into account when counting the number of hydrogens to be added. The returned graph will also have a formal_charges attribute. This routine only adds hydrogen atoms for a limited set of atoms from the periodic system: B, C, N, O, F, Al, Si, P, S, Cl, Br. """ new_edges = list(self.edges) counter = self.num_vertices for i in range(self.num_vertices): num_elec = self.numbers[i] if formal_charges is not None: num_elec -= int(formal_charges[i]) if num_elec >= 5 and num_elec <= 9: num_hydrogen = num_elec - 10 + 8 elif num_elec >= 13 and num_elec <= 17: num_hydrogen = num_elec - 18 + 8 elif num_elec == 35: num_hydrogen = 1 else: continue if num_hydrogen > 4: num_hydrogen = 8 - num_hydrogen for n in self.neighbors[i]: bo = self.orders[self.edge_index[frozenset([i, n])]] if bo <= 0: bo = 1 num_hydrogen -= int(bo) for j in range(num_hydrogen): new_edges.append((i, counter)) counter += 1 new_numbers = np.zeros(counter, int) new_numbers[:self.num_vertices] = self.numbers new_numbers[self.num_vertices:] = 1 new_orders = np.zeros(len(new_edges), int) new_orders[:self.num_edges] = self.orders new_orders[self.num_edges:] = 1 result = MolecularGraph(new_edges, new_numbers, new_orders) return result
[ "def", "add_hydrogens", "(", "self", ",", "formal_charges", "=", "None", ")", ":", "new_edges", "=", "list", "(", "self", ".", "edges", ")", "counter", "=", "self", ".", "num_vertices", "for", "i", "in", "range", "(", "self", ".", "num_vertices", ")", ...
Returns a molecular graph where hydrogens are added explicitely When the bond order is unknown, it assumes bond order one. If the graph has an attribute formal_charges, this routine will take it into account when counting the number of hydrogens to be added. The returned graph will also have a formal_charges attribute. This routine only adds hydrogen atoms for a limited set of atoms from the periodic system: B, C, N, O, F, Al, Si, P, S, Cl, Br.
[ "Returns", "a", "molecular", "graph", "where", "hydrogens", "are", "added", "explicitely" ]
python
train
42.681818
cltk/cltk
cltk/corpus/sanskrit/itrans/unicode_transliterate.py
https://github.com/cltk/cltk/blob/ed9c025b7ec43c949481173251b70e05e4dffd27/cltk/corpus/sanskrit/itrans/unicode_transliterate.py#L76-L115
def transliterate(text,lang1_code,lang2_code): """ convert the source language script (lang1) to target language script (lang2) text: text to transliterate lang1_code: language 1 code lang1_code: language 2 code """ if (lang1_code in langinfo.SCRIPT_RANGES) and (lang2_code in langinfo.SCRIPT_RANGES): # if Sinhala is source, do a mapping to Devanagari first if lang1_code=='si': text=sdt.sinhala_to_devanagari(text) lang1_code='hi' # if Sinhala is target, make Devanagiri the intermediate target org_lang2_code='' if lang2_code=='si': lang2_code='hi' org_lang2_code='si' trans_lit_text=[] for c in text: newc=c offset=ord(c)-langinfo.SCRIPT_RANGES[lang1_code][0] if offset >=langinfo.COORDINATED_RANGE_START_INCLUSIVE and offset <= langinfo.COORDINATED_RANGE_END_INCLUSIVE: if lang2_code=='ta': # tamil exceptions offset=UnicodeIndicTransliterator._correct_tamil_mapping(offset) newc=py23char(langinfo.SCRIPT_RANGES[lang2_code][0]+offset) trans_lit_text.append(newc) # if Sinhala is source, do a mapping to Devanagari first if org_lang2_code=='si': return sdt.devanagari_to_sinhala(''.join(trans_lit_text)) return (''.join(trans_lit_text)) else: return text
[ "def", "transliterate", "(", "text", ",", "lang1_code", ",", "lang2_code", ")", ":", "if", "(", "lang1_code", "in", "langinfo", ".", "SCRIPT_RANGES", ")", "and", "(", "lang2_code", "in", "langinfo", ".", "SCRIPT_RANGES", ")", ":", "# if Sinhala is source, do a m...
convert the source language script (lang1) to target language script (lang2) text: text to transliterate lang1_code: language 1 code lang1_code: language 2 code
[ "convert", "the", "source", "language", "script", "(", "lang1", ")", "to", "target", "language", "script", "(", "lang2", ")" ]
python
train
39.6
emc-openstack/storops
storops/unity/resource/lun.py
https://github.com/emc-openstack/storops/blob/24b4b13bf065c0ef0538dd0b5ebb8f25d24176bd/storops/unity/resource/lun.py#L415-L437
def replicate(self, dst_lun_id, max_time_out_of_sync, replication_name=None, replicate_existing_snaps=None, remote_system=None): """ Creates a replication session with a existing lun as destination. :param dst_lun_id: destination lun id. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param replication_name: replication name. :param replicate_existing_snaps: whether to replicate existing snaps. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :return: created replication session. """ return UnityReplicationSession.create( self._cli, self.get_id(), dst_lun_id, max_time_out_of_sync, name=replication_name, replicate_existing_snaps=replicate_existing_snaps, remote_system=remote_system)
[ "def", "replicate", "(", "self", ",", "dst_lun_id", ",", "max_time_out_of_sync", ",", "replication_name", "=", "None", ",", "replicate_existing_snaps", "=", "None", ",", "remote_system", "=", "None", ")", ":", "return", "UnityReplicationSession", ".", "create", "(...
Creates a replication session with a existing lun as destination. :param dst_lun_id: destination lun id. :param max_time_out_of_sync: maximum time to wait before syncing the source and destination. Value `-1` means the automatic sync is not performed. `0` means it is a sync replication. :param replication_name: replication name. :param replicate_existing_snaps: whether to replicate existing snaps. :param remote_system: `UnityRemoteSystem` object. The remote system to which the replication is being configured. When not specified, it defaults to local system. :return: created replication session.
[ "Creates", "a", "replication", "session", "with", "a", "existing", "lun", "as", "destination", "." ]
python
train
49.217391
tornadoweb/tornado
tornado/auth.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/auth.py#L712-L732
async def authenticate_redirect(self, callback_uri: str = None) -> None: """Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead. """ http = self.get_auth_http_client() response = await http.fetch( self._oauth_request_token_url(callback_uri=callback_uri) ) self._on_request_token(self._OAUTH_AUTHENTICATE_URL, None, response)
[ "async", "def", "authenticate_redirect", "(", "self", ",", "callback_uri", ":", "str", "=", "None", ")", "->", "None", ":", "http", "=", "self", ".", "get_auth_http_client", "(", ")", "response", "=", "await", "http", ".", "fetch", "(", "self", ".", "_oa...
Just like `~OAuthMixin.authorize_redirect`, but auto-redirects if authorized. This is generally the right interface to use if you are using Twitter for single-sign on. .. versionchanged:: 3.1 Now returns a `.Future` and takes an optional callback, for compatibility with `.gen.coroutine`. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned awaitable object instead.
[ "Just", "like", "~OAuthMixin", ".", "authorize_redirect", "but", "auto", "-", "redirects", "if", "authorized", "." ]
python
train
37.809524
elastic/apm-agent-python
elasticapm/utils/compat.py
https://github.com/elastic/apm-agent-python/blob/2975663d7bd22282dc39336b2c37b37c12c7a774/elasticapm/utils/compat.py#L162-L169
def multidict_to_dict(d): """ Turns a werkzeug.MultiDict or django.MultiValueDict into a dict with list values :param d: a MultiDict or MultiValueDict instance :return: a dict instance """ return dict((k, v[0] if len(v) == 1 else v) for k, v in iterlists(d))
[ "def", "multidict_to_dict", "(", "d", ")", ":", "return", "dict", "(", "(", "k", ",", "v", "[", "0", "]", "if", "len", "(", "v", ")", "==", "1", "else", "v", ")", "for", "k", ",", "v", "in", "iterlists", "(", "d", ")", ")" ]
Turns a werkzeug.MultiDict or django.MultiValueDict into a dict with list values :param d: a MultiDict or MultiValueDict instance :return: a dict instance
[ "Turns", "a", "werkzeug", ".", "MultiDict", "or", "django", ".", "MultiValueDict", "into", "a", "dict", "with", "list", "values", ":", "param", "d", ":", "a", "MultiDict", "or", "MultiValueDict", "instance", ":", "return", ":", "a", "dict", "instance" ]
python
train
34.875
markovmodel/PyEMMA
pyemma/thermo/api.py
https://github.com/markovmodel/PyEMMA/blob/5c3124398217de05ba5ce9c8fb01519222481ab8/pyemma/thermo/api.py#L42-L280
def estimate_umbrella_sampling( us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=None, md_dtrajs=None, kT=None, maxiter=10000, maxerr=1.0E-15, save_convergence_info=0, estimator='wham', lag=1, dt_traj='1 step', init=None, init_maxiter=10000, init_maxerr=1.0E-8, width=None, **kwargs): r""" This function acts as a wrapper for ``tram()``, ``dtram()``, ``mbar()``, and ``wham()`` and handles the calculation of bias energies (``bias``) and thermodynamic state trajectories (``ttrajs``) when the data comes from umbrella sampling and (optional) unbiased simulations. Parameters ---------- us_trajs : list of N arrays, each of shape (T_i, d) List of arrays, each having T_i rows, one for each time step, and d columns where d is the dimensionality of the subspace in which umbrella sampling was applied. Often d=1, and thus us_trajs will be a list of 1d-arrays. us_dtrajs : list of N int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the umbrella sampling trajectory is in at any time. us_centers : list of N floats or d-dimensional arrays of floats List or array of N center positions. Each position must be a d-dimensional vector. For 1d umbrella sampling, one can simply pass a list of centers, e.g. [-5.0, -4.0, -3.0, ... ]. us_force_constants : list of N floats or d- or dxd-dimensional arrays of floats The force constants used in the umbrellas, unit-less (e.g. kT per squared length unit). For multidimensional umbrella sampling, the force matrix must be used. md_trajs : list of M arrays, each of shape (T_i, d), optional, default=None Unbiased molecular dynamics simulations; format like us_trajs. md_dtrajs : list of M int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the unbiased trajectory is in at any time. kT : float or None, optional, default=None Use this attribute if the supplied force constants are NOT unit-less; kT must have the same energy unit as the force constants. maxiter : int, optional, default=10000 The maximum number of self-consistent iterations before the estimator exits unsuccessfully. maxerr : float, optional, default=1.0E-15 Convergence criterion based on the maximal free energy change in a self-consistent iteration step. save_convergence_info : int, optional, default=0 Every save_convergence_info iteration steps, store the actual increment and the actual loglikelihood; 0 means no storage. estimator : str, optional, default='wham' Specify one of the available estimators | 'wham': use WHAM | 'mbar': use MBAR | 'dtram': use the discrete version of TRAM | 'tram': use TRAM lag : int or list of int, optional, default=1 Integer lag time at which transitions are counted. Providing a list of lag times will trigger one estimation per lag time. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' init : str, optional, default=None Use a specific initialization for the self-consistent iteration: | None: use a hard-coded guess for free energies and Lagrangian multipliers | 'wham': perform a short WHAM estimate to initialize the free energies (only with dtram) | 'mbar': perform a short MBAR estimate to initialize the free energies (only with tram) init_maxiter : int, optional, default=10000 The maximum number of self-consistent iterations during the initialization. init_maxerr : float, optional, default=1.0E-8 Convergence criterion for the initialization. width : array-like of float, optional, default=None Specify periodicity for individual us_traj dimensions. Each positive entry will make the corresponding feature periodic and use the given value as width. None/zero values will be treated as non-periodic. **kwargs : dict, optional You can use this to pass estimator-specific named parameters to the chosen estimator, which are not already coverd by ``estimate_umbrella_sampling()``. Returns ------- A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof The requested estimator/model object, i.e., WHAM, MBAR, DTRAM or TRAM. If multiple lag times are given, a list of objects is returned (one MEMM per lag time). Example ------- We look at a 1D umbrella sampling simulation with two umbrellas at 1.1 and 1.3 on the reaction coordinate with spring constant of 1.0; additionally, we have two unbiased simulations. We start with a joint clustering and use TRAM for the estimation: >>> from pyemma.coordinates import cluster_regspace as regspace >>> from pyemma.thermo import estimate_umbrella_sampling as estimate_us >>> import numpy as np >>> us_centers = [1.1, 1.3] >>> us_force_constants = [1.0, 1.0] >>> us_trajs = [np.array([1.0, 1.1, 1.2, 1.1, 1.0, 1.1]).reshape((-1, 1)), np.array([1.3, 1.2, 1.3, 1.4, 1.4, 1.3]).reshape((-1, 1))] >>> md_trajs = [np.array([0.9, 1.0, 1.1, 1.2, 1.3, 1.4]).reshape((-1, 1)), np.array([1.5, 1.4, 1.3, 1.4, 1.4, 1.5]).reshape((-1, 1))] >>> cluster = regspace(data=us_trajs+md_trajs, max_centers=10, dmin=0.15) >>> us_dtrajs = cluster.dtrajs[:2] >>> md_dtrajs = cluster.dtrajs[2:] >>> centers = cluster.clustercenters >>> tram = estimate_us(us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=md_trajs, md_dtrajs=md_dtrajs, estimator='tram', lag=1) >>> tram.f # doctest: +ELLIPSIS array([ 0.63..., 1.60..., 1.31...]) See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation. .. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :attributes: .. autoclass:: pyemma.thermo.models.memm.MEMM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.memm.MEMM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.memm.MEMM :attributes: """ from .util import get_umbrella_sampling_data as _get_umbrella_sampling_data # sanity checks if estimator not in ['wham', 'mbar', 'dtram', 'tram']: raise ValueError("unsupported estimator: %s" % estimator) if not isinstance(us_trajs, (list, tuple)): raise ValueError("The parameter us_trajs must be a list of numpy.ndarray objects") if not isinstance(us_centers, (list, tuple)): raise ValueError( "The parameter us_centers must be a list of floats or numpy.ndarray objects") if not isinstance(us_force_constants, (list, tuple)): raise ValueError( "The parameter us_force_constants must be a list of floats or numpy.ndarray objects") if len(us_trajs) != len(us_centers): raise ValueError("Unmatching number of umbrella sampling trajectories and centers: %d!=%d" \ % (len(us_trajs), len(us_centers))) if len(us_trajs) != len(us_force_constants): raise ValueError( "Unmatching number of umbrella sampling trajectories and force constants: %d!=%d" \ % (len(us_trajs), len(us_force_constants))) if len(us_trajs) != len(us_dtrajs): raise ValueError( "Number of continuous and discrete umbrella sampling trajectories does not " + \ "match: %d!=%d" % (len(us_trajs), len(us_dtrajs))) i = 0 for traj, dtraj in zip(us_trajs, us_dtrajs): if traj.shape[0] != dtraj.shape[0]: raise ValueError( "Lengths of continuous and discrete umbrella sampling trajectories with " + \ "index %d does not match: %d!=%d" % (i, len(us_trajs), len(us_dtrajs))) i += 1 if md_trajs is not None: if not isinstance(md_trajs, (list, tuple)): raise ValueError("The parameter md_trajs must be a list of numpy.ndarray objects") if md_dtrajs is None: raise ValueError("You have provided md_trajs, but md_dtrajs is None") if md_dtrajs is None: md_dtrajs = [] else: if md_trajs is None: raise ValueError("You have provided md_dtrajs, but md_trajs is None") if len(md_trajs) != len(md_dtrajs): raise ValueError( "Number of continuous and discrete unbiased trajectories does not " + \ "match: %d!=%d" % (len(md_trajs), len(md_dtrajs))) i = 0 for traj, dtraj in zip(md_trajs, md_dtrajs): if traj.shape[0] != dtraj.shape[0]: raise ValueError( "Lengths of continuous and discrete unbiased trajectories with " + \ "index %d does not match: %d!=%d" % (i, len(md_trajs), len(md_dtrajs))) i += 1 # data preparation ttrajs, btrajs, umbrella_centers, force_constants, unbiased_state = _get_umbrella_sampling_data( us_trajs, us_centers, us_force_constants, md_trajs=md_trajs, kT=kT, width=width) estimator_obj = None # estimation if estimator == 'wham': estimator_obj = wham( ttrajs, us_dtrajs + md_dtrajs, _get_averaged_bias_matrix(btrajs, us_dtrajs + md_dtrajs), maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj) elif estimator == 'mbar': allowed_keys = ['direct_space'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = mbar( ttrajs, us_dtrajs + md_dtrajs, btrajs, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, **parsed_kwargs) elif estimator == 'dtram': allowed_keys = ['count_mode', 'connectivity'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = dtram( ttrajs, us_dtrajs + md_dtrajs, _get_averaged_bias_matrix(btrajs, us_dtrajs + md_dtrajs), lag, unbiased_state=unbiased_state, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr, **parsed_kwargs) elif estimator == 'tram': allowed_keys = [ 'count_mode', 'connectivity', 'connectivity_factor','nn', 'direct_space', 'N_dtram_accelerations', 'equilibrium', 'overcounting_factor', 'callback'] parsed_kwargs = dict([(i, kwargs[i]) for i in allowed_keys if i in kwargs]) estimator_obj = tram( ttrajs, us_dtrajs + md_dtrajs, btrajs, lag, unbiased_state=unbiased_state, maxiter=maxiter, maxerr=maxerr, save_convergence_info=save_convergence_info, dt_traj=dt_traj, init=init, init_maxiter=init_maxiter, init_maxerr=init_maxerr, **parsed_kwargs) # adding thermodynamic state information and return results try: estimator_obj.umbrella_centers = umbrella_centers estimator_obj.force_constants = force_constants except AttributeError: for obj in estimator_obj: obj.umbrella_centers = umbrella_centers obj.force_constants = force_constants return estimator_obj
[ "def", "estimate_umbrella_sampling", "(", "us_trajs", ",", "us_dtrajs", ",", "us_centers", ",", "us_force_constants", ",", "md_trajs", "=", "None", ",", "md_dtrajs", "=", "None", ",", "kT", "=", "None", ",", "maxiter", "=", "10000", ",", "maxerr", "=", "1.0E...
r""" This function acts as a wrapper for ``tram()``, ``dtram()``, ``mbar()``, and ``wham()`` and handles the calculation of bias energies (``bias``) and thermodynamic state trajectories (``ttrajs``) when the data comes from umbrella sampling and (optional) unbiased simulations. Parameters ---------- us_trajs : list of N arrays, each of shape (T_i, d) List of arrays, each having T_i rows, one for each time step, and d columns where d is the dimensionality of the subspace in which umbrella sampling was applied. Often d=1, and thus us_trajs will be a list of 1d-arrays. us_dtrajs : list of N int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the umbrella sampling trajectory is in at any time. us_centers : list of N floats or d-dimensional arrays of floats List or array of N center positions. Each position must be a d-dimensional vector. For 1d umbrella sampling, one can simply pass a list of centers, e.g. [-5.0, -4.0, -3.0, ... ]. us_force_constants : list of N floats or d- or dxd-dimensional arrays of floats The force constants used in the umbrellas, unit-less (e.g. kT per squared length unit). For multidimensional umbrella sampling, the force matrix must be used. md_trajs : list of M arrays, each of shape (T_i, d), optional, default=None Unbiased molecular dynamics simulations; format like us_trajs. md_dtrajs : list of M int arrays, each of shape (T_i,) The integers are indexes in 0,...,n-1 enumerating the n discrete states or the bins the unbiased trajectory is in at any time. kT : float or None, optional, default=None Use this attribute if the supplied force constants are NOT unit-less; kT must have the same energy unit as the force constants. maxiter : int, optional, default=10000 The maximum number of self-consistent iterations before the estimator exits unsuccessfully. maxerr : float, optional, default=1.0E-15 Convergence criterion based on the maximal free energy change in a self-consistent iteration step. save_convergence_info : int, optional, default=0 Every save_convergence_info iteration steps, store the actual increment and the actual loglikelihood; 0 means no storage. estimator : str, optional, default='wham' Specify one of the available estimators | 'wham': use WHAM | 'mbar': use MBAR | 'dtram': use the discrete version of TRAM | 'tram': use TRAM lag : int or list of int, optional, default=1 Integer lag time at which transitions are counted. Providing a list of lag times will trigger one estimation per lag time. dt_traj : str, optional, default='1 step' Description of the physical time corresponding to the lag. May be used by analysis algorithms such as plotting tools to pretty-print the axes. By default '1 step', i.e. there is no physical time unit. Specify by a number, whitespace and unit. Permitted units are (* is an arbitrary string): | 'fs', 'femtosecond*' | 'ps', 'picosecond*' | 'ns', 'nanosecond*' | 'us', 'microsecond*' | 'ms', 'millisecond*' | 's', 'second*' init : str, optional, default=None Use a specific initialization for the self-consistent iteration: | None: use a hard-coded guess for free energies and Lagrangian multipliers | 'wham': perform a short WHAM estimate to initialize the free energies (only with dtram) | 'mbar': perform a short MBAR estimate to initialize the free energies (only with tram) init_maxiter : int, optional, default=10000 The maximum number of self-consistent iterations during the initialization. init_maxerr : float, optional, default=1.0E-8 Convergence criterion for the initialization. width : array-like of float, optional, default=None Specify periodicity for individual us_traj dimensions. Each positive entry will make the corresponding feature periodic and use the given value as width. None/zero values will be treated as non-periodic. **kwargs : dict, optional You can use this to pass estimator-specific named parameters to the chosen estimator, which are not already coverd by ``estimate_umbrella_sampling()``. Returns ------- A :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` object or list thereof The requested estimator/model object, i.e., WHAM, MBAR, DTRAM or TRAM. If multiple lag times are given, a list of objects is returned (one MEMM per lag time). Example ------- We look at a 1D umbrella sampling simulation with two umbrellas at 1.1 and 1.3 on the reaction coordinate with spring constant of 1.0; additionally, we have two unbiased simulations. We start with a joint clustering and use TRAM for the estimation: >>> from pyemma.coordinates import cluster_regspace as regspace >>> from pyemma.thermo import estimate_umbrella_sampling as estimate_us >>> import numpy as np >>> us_centers = [1.1, 1.3] >>> us_force_constants = [1.0, 1.0] >>> us_trajs = [np.array([1.0, 1.1, 1.2, 1.1, 1.0, 1.1]).reshape((-1, 1)), np.array([1.3, 1.2, 1.3, 1.4, 1.4, 1.3]).reshape((-1, 1))] >>> md_trajs = [np.array([0.9, 1.0, 1.1, 1.2, 1.3, 1.4]).reshape((-1, 1)), np.array([1.5, 1.4, 1.3, 1.4, 1.4, 1.5]).reshape((-1, 1))] >>> cluster = regspace(data=us_trajs+md_trajs, max_centers=10, dmin=0.15) >>> us_dtrajs = cluster.dtrajs[:2] >>> md_dtrajs = cluster.dtrajs[2:] >>> centers = cluster.clustercenters >>> tram = estimate_us(us_trajs, us_dtrajs, us_centers, us_force_constants, md_trajs=md_trajs, md_dtrajs=md_dtrajs, estimator='tram', lag=1) >>> tram.f # doctest: +ELLIPSIS array([ 0.63..., 1.60..., 1.31...]) See :class:`MultiThermModel <pyemma.thermo.models.multi_therm.MultiThermModel>` or :class:`MEMM <pyemma.thermo.models.memm.MEMM>` for a full documentation. .. autoclass:: pyemma.thermo.models.multi_therm.MultiThermModel :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.multi_therm.MultiThermModel :attributes: .. autoclass:: pyemma.thermo.models.memm.MEMM :members: :undoc-members: .. rubric:: Methods .. autoautosummary:: pyemma.thermo.models.memm.MEMM :methods: .. rubric:: Attributes .. autoautosummary:: pyemma.thermo.models.memm.MEMM :attributes:
[ "r", "This", "function", "acts", "as", "a", "wrapper", "for", "tram", "()", "dtram", "()", "mbar", "()", "and", "wham", "()", "and", "handles", "the", "calculation", "of", "bias", "energies", "(", "bias", ")", "and", "thermodynamic", "state", "trajectories...
python
train
51.317992
kata198/AdvancedHTMLParser
AdvancedHTMLParser/conversions.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/conversions.py#L130-L174
def convertPossibleValues(val, possibleValues, invalidDefault, emptyValue=''): ''' convertPossibleValues - Convert input value to one of several possible values, with a default for invalid entries @param val <None/str> - The input value @param possibleValues list<str> - A list of possible values @param invalidDefault <None/str/Exception> - The value to return if "val" is not empty string/None and "val" is not in #possibleValues If instantiated Exception (like ValueError('blah')): Raise this exception If an Exception type ( like ValueError ) - Instantiate and raise this exception type Otherwise, use this raw value @param emptyValue Default '', used for an empty value (empty string or None) ''' from .utils import tostr # If null, retain null if val is None: if emptyValue is EMPTY_IS_INVALID: return _handleInvalid(invalidDefault) return emptyValue # Convert to a string val = tostr(val).lower() # If empty string, same as null if val == '': if emptyValue is EMPTY_IS_INVALID: return _handleInvalid(invalidDefault) return emptyValue # Check if this is a valid value if val not in possibleValues: return _handleInvalid(invalidDefault) return val
[ "def", "convertPossibleValues", "(", "val", ",", "possibleValues", ",", "invalidDefault", ",", "emptyValue", "=", "''", ")", ":", "from", ".", "utils", "import", "tostr", "# If null, retain null", "if", "val", "is", "None", ":", "if", "emptyValue", "is", "EMPT...
convertPossibleValues - Convert input value to one of several possible values, with a default for invalid entries @param val <None/str> - The input value @param possibleValues list<str> - A list of possible values @param invalidDefault <None/str/Exception> - The value to return if "val" is not empty string/None and "val" is not in #possibleValues If instantiated Exception (like ValueError('blah')): Raise this exception If an Exception type ( like ValueError ) - Instantiate and raise this exception type Otherwise, use this raw value @param emptyValue Default '', used for an empty value (empty string or None)
[ "convertPossibleValues", "-", "Convert", "input", "value", "to", "one", "of", "several", "possible", "values", "with", "a", "default", "for", "invalid", "entries" ]
python
train
32.844444
rbarrois/tdparser
tdparser/lexer.py
https://github.com/rbarrois/tdparser/blob/31225aa5149f2ab507ccce15a020fbb37d0acb56/tdparser/lexer.py#L127-L152
def lex(self, text): """Split self.text into a list of tokens. Args: text (str): text to parse Yields: Token: the tokens generated from the given text. """ pos = 0 while text: token_class, match = self.tokens.get_token(text) if token_class is not None: matched_text = text[match.start():match.end()] yield token_class(matched_text) text = text[match.end():] pos += match.end() elif text[0] in self.blank_chars: text = text[1:] pos += 1 else: raise LexerError( 'Invalid character %s in %s' % (text[0], text), position=pos) yield self.end_token()
[ "def", "lex", "(", "self", ",", "text", ")", ":", "pos", "=", "0", "while", "text", ":", "token_class", ",", "match", "=", "self", ".", "tokens", ".", "get_token", "(", "text", ")", "if", "token_class", "is", "not", "None", ":", "matched_text", "=", ...
Split self.text into a list of tokens. Args: text (str): text to parse Yields: Token: the tokens generated from the given text.
[ "Split", "self", ".", "text", "into", "a", "list", "of", "tokens", "." ]
python
train
31.153846
iotile/coretools
iotilecore/iotile/core/hw/update/records/reflash_tile.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/hw/update/records/reflash_tile.py#L165-L188
def _parse_target(target): """Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data """ if len(target) != 8: raise ArgumentError("Invalid targeting data length", expected=8, length=len(target)) slot, match_op = struct.unpack("<B6xB", target) if match_op == _MATCH_CONTROLLER: return {'controller': True, 'slot': 0} elif match_op == _MATCH_SLOT: return {'controller': False, 'slot': slot} raise ArgumentError("Unsupported complex targeting specified", match_op=match_op)
[ "def", "_parse_target", "(", "target", ")", ":", "if", "len", "(", "target", ")", "!=", "8", ":", "raise", "ArgumentError", "(", "\"Invalid targeting data length\"", ",", "expected", "=", "8", ",", "length", "=", "len", "(", "target", ")", ")", "slot", "...
Parse a binary targeting information structure. This function only supports extracting the slot number or controller from the target and will raise an ArgumentError if more complicated targeting is desired. Args: target (bytes): The binary targeting data blob. Returns: dict: The parsed targeting data
[ "Parse", "a", "binary", "targeting", "information", "structure", "." ]
python
train
32.833333
SuryaSankar/flask-sqlalchemy-booster
flask_sqlalchemy_booster/responses.py
https://github.com/SuryaSankar/flask-sqlalchemy-booster/blob/444048d167ab7718f758e943665ef32d101423a5/flask_sqlalchemy_booster/responses.py#L191-L216
def jsoned(struct, wrap=True, meta=None, struct_key='result', pre_render_callback=None): """ Provides a json dump of the struct Args: struct: The data to dump wrap (bool, optional): Specify whether to wrap the struct in an enclosing dict struct_key (str, optional): The string key which will contain the struct in the result dict meta (dict, optional): An optional dictonary to merge with the output dictionary. Examples: >>> jsoned([3,4,5]) ... '{"status": "success", "result": [3, 4, 5]}' >>> jsoned([3,4,5], wrap=False) ... '[3, 4, 5]' """ return _json.dumps( structured( struct, wrap=wrap, meta=meta, struct_key=struct_key, pre_render_callback=pre_render_callback), default=json_encoder)
[ "def", "jsoned", "(", "struct", ",", "wrap", "=", "True", ",", "meta", "=", "None", ",", "struct_key", "=", "'result'", ",", "pre_render_callback", "=", "None", ")", ":", "return", "_json", ".", "dumps", "(", "structured", "(", "struct", ",", "wrap", "...
Provides a json dump of the struct Args: struct: The data to dump wrap (bool, optional): Specify whether to wrap the struct in an enclosing dict struct_key (str, optional): The string key which will contain the struct in the result dict meta (dict, optional): An optional dictonary to merge with the output dictionary. Examples: >>> jsoned([3,4,5]) ... '{"status": "success", "result": [3, 4, 5]}' >>> jsoned([3,4,5], wrap=False) ... '[3, 4, 5]'
[ "Provides", "a", "json", "dump", "of", "the", "struct" ]
python
train
31.961538