repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
Valuehorizon/valuehorizon-people
people/models.py
https://github.com/Valuehorizon/valuehorizon-people/blob/f32d9f1349c1a9384bae5ea61d10c1b1e0318401/people/models.py#L102-L115
def save(self, *args, **kwargs): """ If date of death is specified, set is_deceased to true """ if self.date_of_death != None: self.is_deceased = True # Since we often copy and paste names from strange sources, do some basic cleanup self.first_name = self.first_name.strip() self.last_name = self.last_name.strip() self.other_names = self.other_names.strip() # Call save method super(Person, self).save(*args, **kwargs)
[ "def", "save", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "date_of_death", "!=", "None", ":", "self", ".", "is_deceased", "=", "True", "# Since we often copy and paste names from strange sources, do some basic cleanup", "s...
If date of death is specified, set is_deceased to true
[ "If", "date", "of", "death", "is", "specified", "set", "is_deceased", "to", "true" ]
python
train
marcinmiklitz/pywindow
pywindow/utilities.py
https://github.com/marcinmiklitz/pywindow/blob/e5264812157224f22a691741ca2e0aefdc9bd2eb/pywindow/utilities.py#L720-L732
def lattice_array_to_unit_cell(lattice_array): """Return crystallographic param. from unit cell lattice matrix.""" cell_lengths = np.sqrt(np.sum(lattice_array**2, axis=0)) gamma_r = np.arccos(lattice_array[0][1] / cell_lengths[1]) beta_r = np.arccos(lattice_array[0][2] / cell_lengths[2]) alpha_r = np.arccos( lattice_array[1][2] * np.sin(gamma_r) / cell_lengths[2] + np.cos(beta_r) * np.cos(gamma_r) ) cell_angles = [ np.rad2deg(alpha_r), np.rad2deg(beta_r), np.rad2deg(gamma_r) ] return np.append(cell_lengths, cell_angles)
[ "def", "lattice_array_to_unit_cell", "(", "lattice_array", ")", ":", "cell_lengths", "=", "np", ".", "sqrt", "(", "np", ".", "sum", "(", "lattice_array", "**", "2", ",", "axis", "=", "0", ")", ")", "gamma_r", "=", "np", ".", "arccos", "(", "lattice_array...
Return crystallographic param. from unit cell lattice matrix.
[ "Return", "crystallographic", "param", ".", "from", "unit", "cell", "lattice", "matrix", "." ]
python
train
havardgulldahl/jottalib
src/jottalib/JFS.py
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/JFS.py#L698-L700
def updated(self): 'return datetime.datetime' return dateutil.parser.parse(str(self.f.currentRevision.updated))
[ "def", "updated", "(", "self", ")", ":", "return", "dateutil", ".", "parser", ".", "parse", "(", "str", "(", "self", ".", "f", ".", "currentRevision", ".", "updated", ")", ")" ]
return datetime.datetime
[ "return", "datetime", ".", "datetime" ]
python
train
alejandroautalan/pygubu
pygubu/stockimage.py
https://github.com/alejandroautalan/pygubu/blob/41c8fb37ef973736ec5d68cbe1cd4ecb78712e40/pygubu/stockimage.py#L124-L136
def get(cls, rkey): """Get image previously registered with key rkey. If key not exist, raise StockImageException """ if rkey in cls._cached: logger.info('Resource %s is in cache.' % rkey) return cls._cached[rkey] if rkey in cls._stock: img = cls._load_image(rkey) return img else: raise StockImageException('StockImage: %s not registered.' % rkey)
[ "def", "get", "(", "cls", ",", "rkey", ")", ":", "if", "rkey", "in", "cls", ".", "_cached", ":", "logger", ".", "info", "(", "'Resource %s is in cache.'", "%", "rkey", ")", "return", "cls", ".", "_cached", "[", "rkey", "]", "if", "rkey", "in", "cls",...
Get image previously registered with key rkey. If key not exist, raise StockImageException
[ "Get", "image", "previously", "registered", "with", "key", "rkey", ".", "If", "key", "not", "exist", "raise", "StockImageException" ]
python
train
wummel/linkchecker
linkcheck/lc_cgi.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/lc_cgi.py#L105-L111
def write (self, data): """Write given unicode data to buffer.""" assert isinstance(data, unicode) if self.closed: raise IOError("Write on closed I/O object") if data: self.buf.append(data)
[ "def", "write", "(", "self", ",", "data", ")", ":", "assert", "isinstance", "(", "data", ",", "unicode", ")", "if", "self", ".", "closed", ":", "raise", "IOError", "(", "\"Write on closed I/O object\"", ")", "if", "data", ":", "self", ".", "buf", ".", ...
Write given unicode data to buffer.
[ "Write", "given", "unicode", "data", "to", "buffer", "." ]
python
train
invisibleroads/socketIO-client
socketIO_client/namespaces.py
https://github.com/invisibleroads/socketIO-client/blob/1e58adda9397500d89b4521c90aa06e6a511cef6/socketIO_client/namespaces.py#L22-L25
def once(self, event, callback): 'Define a callback to handle the first event emitted by the server' self._once_events.add(event) self.on(event, callback)
[ "def", "once", "(", "self", ",", "event", ",", "callback", ")", ":", "self", ".", "_once_events", ".", "add", "(", "event", ")", "self", ".", "on", "(", "event", ",", "callback", ")" ]
Define a callback to handle the first event emitted by the server
[ "Define", "a", "callback", "to", "handle", "the", "first", "event", "emitted", "by", "the", "server" ]
python
train
getsentry/rb
rb/ketama.py
https://github.com/getsentry/rb/blob/569d1d13311f6c04bae537fc17e75da430e4ec45/rb/ketama.py#L45-L58
def _get_node_pos(self, key): """Return node position(integer) for a given key or None.""" if not self._hashring: return k = md5_bytes(key) key = (k[3] << 24) | (k[2] << 16) | (k[1] << 8) | k[0] nodes = self._sorted_keys pos = bisect(nodes, key) if pos == len(nodes): return 0 return pos
[ "def", "_get_node_pos", "(", "self", ",", "key", ")", ":", "if", "not", "self", ".", "_hashring", ":", "return", "k", "=", "md5_bytes", "(", "key", ")", "key", "=", "(", "k", "[", "3", "]", "<<", "24", ")", "|", "(", "k", "[", "2", "]", "<<",...
Return node position(integer) for a given key or None.
[ "Return", "node", "position", "(", "integer", ")", "for", "a", "given", "key", "or", "None", "." ]
python
train
baverman/supplement
supplement/remote.py
https://github.com/baverman/supplement/blob/955002fe5a5749c9f0d89002f0006ec4fcd35bc9/supplement/remote.py#L114-L123
def assist(self, project_path, source, position, filename): """Return completion match and list of completion proposals :param project_path: absolute project path :param source: unicode or byte string code source :param position: character or byte cursor position :param filename: absolute path of file with source code :returns: tuple (completion match, sorted list of proposals) """ return self._call('assist', project_path, source, position, filename)
[ "def", "assist", "(", "self", ",", "project_path", ",", "source", ",", "position", ",", "filename", ")", ":", "return", "self", ".", "_call", "(", "'assist'", ",", "project_path", ",", "source", ",", "position", ",", "filename", ")" ]
Return completion match and list of completion proposals :param project_path: absolute project path :param source: unicode or byte string code source :param position: character or byte cursor position :param filename: absolute path of file with source code :returns: tuple (completion match, sorted list of proposals)
[ "Return", "completion", "match", "and", "list", "of", "completion", "proposals" ]
python
train
upsight/doctor
doctor/parsers.py
https://github.com/upsight/doctor/blob/2cf1d433f6f1aa1355644b449a757c0660793cdd/doctor/parsers.py#L213-L268
def parse_form_and_query_params(req_params: dict, sig_params: dict) -> dict: """Uses the parameter annotations to coerce string params. This is used for HTTP requests, in which the form parameters are all strings, but need to be converted to the appropriate types before validating them. :param dict req_params: The parameters specified in the request. :param dict sig_params: The logic function's signature parameters. :returns: a dict of params parsed from the input dict. :raises TypeSystemError: If there are errors parsing values. """ # Importing here to prevent circular dependencies. from doctor.types import SuperType, UnionType errors = {} parsed_params = {} for param, value in req_params.items(): # Skip request variables not in the function signature. if param not in sig_params: continue # Skip coercing parameters not annotated by a doctor type. if not issubclass(sig_params[param].annotation, SuperType): continue # Check if the type has a custom parser for the parameter. custom_parser = sig_params[param].annotation.parser if custom_parser is not None: if not callable(custom_parser): warnings.warn( 'Parser `{}` is not callable, using default parser.'.format( custom_parser)) custom_parser = None try: if custom_parser is not None: parsed_params[param] = custom_parser(value) else: if issubclass(sig_params[param].annotation, UnionType): json_type = [ _native_type_to_json[_type.native_type] for _type in sig_params[param].annotation.types ] else: native_type = sig_params[param].annotation.native_type json_type = [_native_type_to_json[native_type]] # If the type is nullable, also add null as an allowed type. if sig_params[param].annotation.nullable: json_type.append('null') _, parsed_params[param] = parse_value(value, json_type) except ParseError as e: errors[param] = str(e) if errors: raise TypeSystemError(errors, errors=errors) return parsed_params
[ "def", "parse_form_and_query_params", "(", "req_params", ":", "dict", ",", "sig_params", ":", "dict", ")", "->", "dict", ":", "# Importing here to prevent circular dependencies.", "from", "doctor", ".", "types", "import", "SuperType", ",", "UnionType", "errors", "=", ...
Uses the parameter annotations to coerce string params. This is used for HTTP requests, in which the form parameters are all strings, but need to be converted to the appropriate types before validating them. :param dict req_params: The parameters specified in the request. :param dict sig_params: The logic function's signature parameters. :returns: a dict of params parsed from the input dict. :raises TypeSystemError: If there are errors parsing values.
[ "Uses", "the", "parameter", "annotations", "to", "coerce", "string", "params", "." ]
python
train
yjzhang/uncurl_python
uncurl/ensemble.py
https://github.com/yjzhang/uncurl_python/blob/55c58ca5670f87699d3bd5752fdfa4baa07724dd/uncurl/ensemble.py#L150-L177
def nmf_tsne(data, k, n_runs=10, init='enhanced', **params): """ runs tsne-consensus-NMF 1. run a bunch of NMFs, get W and H 2. run tsne + km on all WH matrices 3. run consensus clustering on all km results 4. use consensus clustering as initialization for a new run of NMF 5. return the W and H from the resulting NMF run """ clusters = [] nmf = NMF(k) tsne = TSNE(2) km = KMeans(k) for i in range(n_runs): w = nmf.fit_transform(data) h = nmf.components_ tsne_wh = tsne.fit_transform(w.dot(h).T) clust = km.fit_predict(tsne_wh) clusters.append(clust) clusterings = np.vstack(clusters) consensus = CE.cluster_ensembles(clusterings, verbose=False, N_clusters_max=k) nmf_new = NMF(k, init='custom') # TODO: find an initialization for the consensus W and H init_w, init_h = nmf_init(data, consensus, k, init) W = nmf_new.fit_transform(data, W=init_w, H=init_h) H = nmf_new.components_ return W, H
[ "def", "nmf_tsne", "(", "data", ",", "k", ",", "n_runs", "=", "10", ",", "init", "=", "'enhanced'", ",", "*", "*", "params", ")", ":", "clusters", "=", "[", "]", "nmf", "=", "NMF", "(", "k", ")", "tsne", "=", "TSNE", "(", "2", ")", "km", "=",...
runs tsne-consensus-NMF 1. run a bunch of NMFs, get W and H 2. run tsne + km on all WH matrices 3. run consensus clustering on all km results 4. use consensus clustering as initialization for a new run of NMF 5. return the W and H from the resulting NMF run
[ "runs", "tsne", "-", "consensus", "-", "NMF" ]
python
train
aestrivex/bctpy
bct/algorithms/centrality.py
https://github.com/aestrivex/bctpy/blob/4cb0e759eb4a038750b07e23bd29958c400684b8/bct/algorithms/centrality.py#L183-L248
def edge_betweenness_bin(G): ''' Edge betweenness centrality is the fraction of all shortest paths in the network that contain a given edge. Edges with high values of betweenness centrality participate in a large number of shortest paths. Parameters ---------- A : NxN np.ndarray binary directed/undirected connection matrix Returns ------- EBC : NxN np.ndarray edge betweenness centrality matrix BC : Nx1 np.ndarray node betweenness centrality vector Notes ----- Betweenness centrality may be normalised to the range [0,1] as BC/[(N-1)(N-2)], where N is the number of nodes in the network. ''' n = len(G) BC = np.zeros((n,)) # vertex betweenness EBC = np.zeros((n, n)) # edge betweenness for u in range(n): D = np.zeros((n,)) D[u] = 1 # distance from u NP = np.zeros((n,)) NP[u] = 1 # number of paths from u P = np.zeros((n, n)) # predecessors Q = np.zeros((n,), dtype=int) # indices q = n - 1 # order of non-increasing distance Gu = G.copy() V = np.array([u]) while V.size: Gu[:, V] = 0 # remove remaining in-edges for v in V: Q[q] = v q -= 1 W, = np.where(Gu[v, :]) # neighbors of V for w in W: if D[w]: NP[w] += NP[v] # NP(u->w) sum of old and new P[w, v] = 1 # v is a predecessor else: D[w] = 1 NP[w] = NP[v] # NP(u->v) = NP of new path P[w, v] = 1 # v is a predecessor V, = np.where(np.any(Gu[V, :], axis=0)) if np.any(np.logical_not(D)): # if some vertices unreachable Q[:q], = np.where(np.logical_not(D)) # ...these are first in line DP = np.zeros((n,)) # dependency for w in Q[:n - 1]: BC[w] += DP[w] for v in np.where(P[w, :])[0]: DPvw = (1 + DP[w]) * NP[v] / NP[w] DP[v] += DPvw EBC[v, w] += DPvw return EBC, BC
[ "def", "edge_betweenness_bin", "(", "G", ")", ":", "n", "=", "len", "(", "G", ")", "BC", "=", "np", ".", "zeros", "(", "(", "n", ",", ")", ")", "# vertex betweenness", "EBC", "=", "np", ".", "zeros", "(", "(", "n", ",", "n", ")", ")", "# edge b...
Edge betweenness centrality is the fraction of all shortest paths in the network that contain a given edge. Edges with high values of betweenness centrality participate in a large number of shortest paths. Parameters ---------- A : NxN np.ndarray binary directed/undirected connection matrix Returns ------- EBC : NxN np.ndarray edge betweenness centrality matrix BC : Nx1 np.ndarray node betweenness centrality vector Notes ----- Betweenness centrality may be normalised to the range [0,1] as BC/[(N-1)(N-2)], where N is the number of nodes in the network.
[ "Edge", "betweenness", "centrality", "is", "the", "fraction", "of", "all", "shortest", "paths", "in", "the", "network", "that", "contain", "a", "given", "edge", ".", "Edges", "with", "high", "values", "of", "betweenness", "centrality", "participate", "in", "a"...
python
train
inasafe/inasafe
safe/gui/tools/multi_buffer_dialog.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/gui/tools/multi_buffer_dialog.py#L178-L185
def get_output_from_input(self): """Populate output form with default output path based on input layer. """ input_path = self.layer.currentLayer().source() output_path = ( os.path.splitext(input_path)[0] + '_multi_buffer' + os.path.splitext(input_path)[1]) self.output_form.setText(output_path)
[ "def", "get_output_from_input", "(", "self", ")", ":", "input_path", "=", "self", ".", "layer", ".", "currentLayer", "(", ")", ".", "source", "(", ")", "output_path", "=", "(", "os", ".", "path", ".", "splitext", "(", "input_path", ")", "[", "0", "]", ...
Populate output form with default output path based on input layer.
[ "Populate", "output", "form", "with", "default", "output", "path", "based", "on", "input", "layer", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/utils/text.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/utils/text.py#L275-L300
def qw(words,flat=0,sep=None,maxsplit=-1): """Similar to Perl's qw() operator, but with some more options. qw(words,flat=0,sep=' ',maxsplit=-1) -> words.split(sep,maxsplit) words can also be a list itself, and with flat=1, the output will be recursively flattened. Examples: >>> qw('1 2') ['1', '2'] >>> qw(['a b','1 2',['m n','p q']]) [['a', 'b'], ['1', '2'], [['m', 'n'], ['p', 'q']]] >>> qw(['a b','1 2',['m n','p q']],flat=1) ['a', 'b', '1', '2', 'm', 'n', 'p', 'q'] """ if isinstance(words, basestring): return [word.strip() for word in words.split(sep,maxsplit) if word and not word.isspace() ] if flat: return flatten(map(qw,words,[1]*len(words))) return map(qw,words)
[ "def", "qw", "(", "words", ",", "flat", "=", "0", ",", "sep", "=", "None", ",", "maxsplit", "=", "-", "1", ")", ":", "if", "isinstance", "(", "words", ",", "basestring", ")", ":", "return", "[", "word", ".", "strip", "(", ")", "for", "word", "i...
Similar to Perl's qw() operator, but with some more options. qw(words,flat=0,sep=' ',maxsplit=-1) -> words.split(sep,maxsplit) words can also be a list itself, and with flat=1, the output will be recursively flattened. Examples: >>> qw('1 2') ['1', '2'] >>> qw(['a b','1 2',['m n','p q']]) [['a', 'b'], ['1', '2'], [['m', 'n'], ['p', 'q']]] >>> qw(['a b','1 2',['m n','p q']],flat=1) ['a', 'b', '1', '2', 'm', 'n', 'p', 'q']
[ "Similar", "to", "Perl", "s", "qw", "()", "operator", "but", "with", "some", "more", "options", "." ]
python
test
brocade/pynos
pynos/versions/base/yang/ietf_netconf.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/ietf_netconf.py#L42-L55
def get_config_input_source_config_source_startup_startup(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_config = ET.Element("get_config") config = get_config input = ET.SubElement(get_config, "input") source = ET.SubElement(input, "source") config_source = ET.SubElement(source, "config-source") startup = ET.SubElement(config_source, "startup") startup = ET.SubElement(startup, "startup") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_config_input_source_config_source_startup_startup", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_config", "=", "ET", ".", "Element", "(", "\"get_config\"", ")", "config", "=", "ge...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
MycroftAI/adapt
adapt/engine.py
https://github.com/MycroftAI/adapt/blob/334f23248b8e09fb9d84a88398424ec5bd3bae4c/adapt/engine.py#L258-L270
def _regex_strings(self): """ A property to link into IntentEngine's _regex_strings. Warning: this is only for backwards compatiblility and should not be used if you intend on using domains. Returns: the domains _regex_strings from its IntentEngine """ domain = 0 if domain not in self.domains: self.register_domain(domain=domain) return self.domains[domain]._regex_strings
[ "def", "_regex_strings", "(", "self", ")", ":", "domain", "=", "0", "if", "domain", "not", "in", "self", ".", "domains", ":", "self", ".", "register_domain", "(", "domain", "=", "domain", ")", "return", "self", ".", "domains", "[", "domain", "]", ".", ...
A property to link into IntentEngine's _regex_strings. Warning: this is only for backwards compatiblility and should not be used if you intend on using domains. Returns: the domains _regex_strings from its IntentEngine
[ "A", "property", "to", "link", "into", "IntentEngine", "s", "_regex_strings", "." ]
python
train
ttroy50/pyephember
pyephember/pyephember.py
https://github.com/ttroy50/pyephember/blob/3ee159ee82b926b957dae8dcbc7a4bfb6807a9b4/pyephember/pyephember.py#L77-L109
def _login(self): """ Login using username / password and get the first auth token """ headers = { "Content-Type": "application/x-www-form-urlencoded", "Accept": "application/json" } url = self.api_base_url + "account/directlogin" data = {'Email': self.username, 'Password': self.password, 'RememberMe': 'True'} response = requests.post(url, data=data, headers=headers, timeout=10) if response.status_code != 200: return False self.login_data = response.json() if not self.login_data['isSuccess']: self.login_data = None return False if ('token' in self.login_data and 'accessToken' in self.login_data['token']): self.home_id = self.login_data['token']['currentHomeId'] self.user_id = self.login_data['token']['userId'] return True self.login_data = None return False
[ "def", "_login", "(", "self", ")", ":", "headers", "=", "{", "\"Content-Type\"", ":", "\"application/x-www-form-urlencoded\"", ",", "\"Accept\"", ":", "\"application/json\"", "}", "url", "=", "self", ".", "api_base_url", "+", "\"account/directlogin\"", "data", "=", ...
Login using username / password and get the first auth token
[ "Login", "using", "username", "/", "password", "and", "get", "the", "first", "auth", "token" ]
python
train
johnwlockwood/stream_tap
stream_tap/__init__.py
https://github.com/johnwlockwood/stream_tap/blob/068f6427c39202991a1db2be842b0fa43c6c5b91/stream_tap/__init__.py#L40-L54
def stream_tap(callables, stream): """ Calls each callable with each item in the stream. Use with Buckets. Make a Bucket with a callable and then pass a tuple of those buckets in as the callables. After iterating over this generator, get contents from each Spigot. :param callables: collection of callable. :param stream: Iterator if values. """ for item in stream: for caller in callables: caller(item) yield item
[ "def", "stream_tap", "(", "callables", ",", "stream", ")", ":", "for", "item", "in", "stream", ":", "for", "caller", "in", "callables", ":", "caller", "(", "item", ")", "yield", "item" ]
Calls each callable with each item in the stream. Use with Buckets. Make a Bucket with a callable and then pass a tuple of those buckets in as the callables. After iterating over this generator, get contents from each Spigot. :param callables: collection of callable. :param stream: Iterator if values.
[ "Calls", "each", "callable", "with", "each", "item", "in", "the", "stream", ".", "Use", "with", "Buckets", ".", "Make", "a", "Bucket", "with", "a", "callable", "and", "then", "pass", "a", "tuple", "of", "those", "buckets", "in", "as", "the", "callables",...
python
train
peergradeio/flask-mongo-profiler
flask_mongo_profiler/contrib/flask_admin/helpers.py
https://github.com/peergradeio/flask-mongo-profiler/blob/a267eeb49fea07c9a24fb370bd9d7a90ed313ccf/flask_mongo_profiler/contrib/flask_admin/helpers.py#L12-L63
def get_list_url_filtered_by_field_value(view, model, name, reverse=False): """Get the URL if a filter of model[name] value was appended. This allows programatically adding filters. This is used in the specialized case of filtering deeper into a list by a field's value. For instance, since there can be multiple assignments in a list of handins. The assignment column can have a URL generated by get_filter_url to filter the handins to show only ones for that assignment. Parameters ---------- view : View instance model : document (model instance, not the class itself) name : field name reverse : bool Whether to *remove* an applied filter from url Returns ------- string : URL of current list args + filtering on field value """ view_args = view._get_list_extra_args() def create_filter_arg(field_name, value): i, flt = next( ( v for k, v in view._filter_args.items() if k == '{}_equals'.format(field_name) ), None, ) return (i, flt.name, value) new_filter = create_filter_arg(name, model[name]) filters = view_args.filters if new_filter in view_args.filters: # Filter already applied if not reverse: return None else: # Remove filter filters.remove(new_filter) if not reverse: # Add Filter filters.append(new_filter) # Example of an activated filter: (u'view_args.filters', [(7, u'Path', u'course')]) return view._get_list_url( view_args.clone(filters=filters, page=0) # Reset page to 0 )
[ "def", "get_list_url_filtered_by_field_value", "(", "view", ",", "model", ",", "name", ",", "reverse", "=", "False", ")", ":", "view_args", "=", "view", ".", "_get_list_extra_args", "(", ")", "def", "create_filter_arg", "(", "field_name", ",", "value", ")", ":...
Get the URL if a filter of model[name] value was appended. This allows programatically adding filters. This is used in the specialized case of filtering deeper into a list by a field's value. For instance, since there can be multiple assignments in a list of handins. The assignment column can have a URL generated by get_filter_url to filter the handins to show only ones for that assignment. Parameters ---------- view : View instance model : document (model instance, not the class itself) name : field name reverse : bool Whether to *remove* an applied filter from url Returns ------- string : URL of current list args + filtering on field value
[ "Get", "the", "URL", "if", "a", "filter", "of", "model", "[", "name", "]", "value", "was", "appended", "." ]
python
train
NiklasRosenstein-Python/nr-deprecated
nr/strex.py
https://github.com/NiklasRosenstein-Python/nr-deprecated/blob/f9f8b89ea1b084841a8ab65784eaf68852686b2a/nr/strex.py#L218-L242
def match(self, regex, flags=0): """ Matches the specified *regex* from the current character of the *scanner* and returns the result. The Scanners column and line numbers are updated respectively. # Arguments regex (str, Pattern): The regex to match. flags (int): The flags to use when compiling the pattern. """ if isinstance(regex, str): regex = re.compile(regex, flags) match = regex.match(self.text, self.index) if not match: return None start, end = match.start(), match.end() lines = self.text.count('\n', start, end) self.index = end if lines: self.colno = end - self.text.rfind('\n', start, end) - 1 self.lineno += lines else: self.colno += end - start return match
[ "def", "match", "(", "self", ",", "regex", ",", "flags", "=", "0", ")", ":", "if", "isinstance", "(", "regex", ",", "str", ")", ":", "regex", "=", "re", ".", "compile", "(", "regex", ",", "flags", ")", "match", "=", "regex", ".", "match", "(", ...
Matches the specified *regex* from the current character of the *scanner* and returns the result. The Scanners column and line numbers are updated respectively. # Arguments regex (str, Pattern): The regex to match. flags (int): The flags to use when compiling the pattern.
[ "Matches", "the", "specified", "*", "regex", "*", "from", "the", "current", "character", "of", "the", "*", "scanner", "*", "and", "returns", "the", "result", ".", "The", "Scanners", "column", "and", "line", "numbers", "are", "updated", "respectively", "." ]
python
train
materialsproject/pymatgen
pymatgen/io/abinit/flows.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/abinit/flows.py#L739-L763
def find_deadlocks(self): """ This function detects deadlocks Return: named tuple with the tasks grouped in: deadlocks, runnables, running """ # Find jobs that can be submitted and and the jobs that are already in the queue. runnables = [] for work in self: runnables.extend(work.fetch_alltasks_to_run()) runnables.extend(list(self.iflat_tasks(status=self.S_SUB))) # Running jobs. running = list(self.iflat_tasks(status=self.S_RUN)) # Find deadlocks. err_tasks = self.errored_tasks deadlocked = [] if err_tasks: for task in self.iflat_tasks(): if any(task.depends_on(err_task) for err_task in err_tasks): deadlocked.append(task) return dict2namedtuple(deadlocked=deadlocked, runnables=runnables, running=running)
[ "def", "find_deadlocks", "(", "self", ")", ":", "# Find jobs that can be submitted and and the jobs that are already in the queue.", "runnables", "=", "[", "]", "for", "work", "in", "self", ":", "runnables", ".", "extend", "(", "work", ".", "fetch_alltasks_to_run", "(",...
This function detects deadlocks Return: named tuple with the tasks grouped in: deadlocks, runnables, running
[ "This", "function", "detects", "deadlocks" ]
python
train
djaodjin/djaodjin-deployutils
src/djd.py
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/src/djd.py#L106-L131
def main(args): """ Main Entry Point """ try: import __main__ parser = argparse.ArgumentParser( usage='%(prog)s [options] command\n\nVersion\n %(prog)s version ' + str(__version__), formatter_class=argparse.RawTextHelpFormatter) parser.add_argument('--version', action='version', version='%(prog)s ' + str(__version__)) build_subcommands_parser(parser, __main__) if len(args) <= 1: parser.print_help() return 1 options = parser.parse_args(args[1:]) # Filter out options with are not part of the function prototype. func_args = filter_subcommand_args(options.func, options) options.func(**func_args) except RuntimeError as err: LOGGER.error(err) return err.code
[ "def", "main", "(", "args", ")", ":", "try", ":", "import", "__main__", "parser", "=", "argparse", ".", "ArgumentParser", "(", "usage", "=", "'%(prog)s [options] command\\n\\nVersion\\n %(prog)s version '", "+", "str", "(", "__version__", ")", ",", "formatter_class...
Main Entry Point
[ "Main", "Entry", "Point" ]
python
train
monarch-initiative/dipper
dipper/sources/FlyBase.py
https://github.com/monarch-initiative/dipper/blob/24cc80db355bbe15776edc5c7b41e0886959ba41/dipper/sources/FlyBase.py#L1324-L1389
def _process_cvterm(self): """ CVterms are the internal identifiers for any controlled vocab or ontology term. Many are xrefd to actual ontologies. The actual external id is stored in the dbxref table, which we place into the internal hashmap for lookup with the cvterm id. The name of the external term is stored in the "name" element of this table, and we add that to the label hashmap for lookup elsewhere :return: """ line_counter = 0 raw = '/'.join((self.rawdir, 'cvterm')) LOG.info("processing cvterms") with open(raw, 'r') as f: f.readline() # read the header row; skip filereader = csv.reader(f, delimiter='\t', quotechar='\"') for line in filereader: line_counter += 1 (cvterm_id, cv_id, definition, dbxref_id, is_obsolete, is_relationshiptype, name) = line # 316 6 1665919 0 0 rRNA_cleavage_snoRNA_primary_transcript # 28 5 1663309 0 0 synonym # 455 6 1665920 0 0 tmRNA # not sure the following is necessary # cv_prefixes = { # 6 : 'SO', # 20: 'FBcv', # 28: 'GO', # 29: 'GO', # 30: 'GO', # 31: 'FBcv', # not actually FBcv - I think FBbt. # 32: 'FBdv', # 37: 'GO', # these are relationships # 73: 'DOID' # } # if int(cv_id) not in cv_prefixes: # continue cvterm_key = cvterm_id cvterm_id = self._makeInternalIdentifier('cvterm', cvterm_key) self.label_hash[cvterm_id] = name self.idhash['cvterm'][cvterm_key] = cvterm_id # look up the dbxref_id for the cvterm # hopefully it's one-to-one dbxrefs = self.dbxrefs.get(dbxref_id) if dbxrefs is not None: if len(dbxrefs) > 1: LOG.info( ">1 dbxref for this cvterm (%s: %s): %s", str(cvterm_id), name, dbxrefs.values()) elif len(dbxrefs) == 1: # replace the cvterm with # the dbxref (external) identifier did = dbxrefs.popitem()[1] # get the value self.idhash['cvterm'][cvterm_key] = did # also add the label to the dbxref self.label_hash[did] = name return
[ "def", "_process_cvterm", "(", "self", ")", ":", "line_counter", "=", "0", "raw", "=", "'/'", ".", "join", "(", "(", "self", ".", "rawdir", ",", "'cvterm'", ")", ")", "LOG", ".", "info", "(", "\"processing cvterms\"", ")", "with", "open", "(", "raw", ...
CVterms are the internal identifiers for any controlled vocab or ontology term. Many are xrefd to actual ontologies. The actual external id is stored in the dbxref table, which we place into the internal hashmap for lookup with the cvterm id. The name of the external term is stored in the "name" element of this table, and we add that to the label hashmap for lookup elsewhere :return:
[ "CVterms", "are", "the", "internal", "identifiers", "for", "any", "controlled", "vocab", "or", "ontology", "term", ".", "Many", "are", "xrefd", "to", "actual", "ontologies", ".", "The", "actual", "external", "id", "is", "stored", "in", "the", "dbxref", "tabl...
python
train
Josef-Friedrich/phrydy
phrydy/mediafile.py
https://github.com/Josef-Friedrich/phrydy/blob/aa13755155977b4776e49f79984f9968ac1d74dc/phrydy/mediafile.py#L117-L139
def mutagen_call(action, path, func, *args, **kwargs): """Call a Mutagen function with appropriate error handling. `action` is a string describing what the function is trying to do, and `path` is the relevant filename. The rest of the arguments describe the callable to invoke. We require at least Mutagen 1.33, where `IOError` is *never* used, neither for internal parsing errors *nor* for ordinary IO error conditions such as a bad filename. Mutagen-specific parsing errors and IO errors are reraised as `UnreadableFileError`. Other exceptions raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`. """ try: return func(*args, **kwargs) except mutagen.MutagenError as exc: log.debug(u'%s failed: %s', action, six.text_type(exc)) raise UnreadableFileError(path, six.text_type(exc)) except Exception as exc: # Isolate bugs in Mutagen. log.debug(u'%s', traceback.format_exc()) log.error(u'uncaught Mutagen exception in %s: %s', action, exc) raise MutagenError(path, exc)
[ "def", "mutagen_call", "(", "action", ",", "path", ",", "func", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "return", "func", "(", "*", "args", ",", "*", "*", "kwargs", ")", "except", "mutagen", ".", "MutagenError", "as", "exc"...
Call a Mutagen function with appropriate error handling. `action` is a string describing what the function is trying to do, and `path` is the relevant filename. The rest of the arguments describe the callable to invoke. We require at least Mutagen 1.33, where `IOError` is *never* used, neither for internal parsing errors *nor* for ordinary IO error conditions such as a bad filename. Mutagen-specific parsing errors and IO errors are reraised as `UnreadableFileError`. Other exceptions raised inside Mutagen---i.e., bugs---are reraised as `MutagenError`.
[ "Call", "a", "Mutagen", "function", "with", "appropriate", "error", "handling", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xcombobox.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xcombobox.py#L395-L418
def setCheckable( self, state ): """ Sets whether or not this combobox stores checkable items. :param state | <bool> """ self._checkable = state # need to be editable to be checkable edit = self.lineEdit() if state: self.setEditable(True) edit.setReadOnly(True) # create connections model = self.model() model.rowsInserted.connect(self.adjustCheckState) model.dataChanged.connect(self.updateCheckedText) elif edit: edit.setReadOnly(False) self.updateCheckState() self.updateCheckedText()
[ "def", "setCheckable", "(", "self", ",", "state", ")", ":", "self", ".", "_checkable", "=", "state", "# need to be editable to be checkable", "edit", "=", "self", ".", "lineEdit", "(", ")", "if", "state", ":", "self", ".", "setEditable", "(", "True", ")", ...
Sets whether or not this combobox stores checkable items. :param state | <bool>
[ "Sets", "whether", "or", "not", "this", "combobox", "stores", "checkable", "items", ".", ":", "param", "state", "|", "<bool", ">" ]
python
train
ethereum/py-evm
eth/chains/base.py
https://github.com/ethereum/py-evm/blob/58346848f076116381d3274bbcea96b9e2cfcbdf/eth/chains/base.py#L545-L551
def get_block_by_hash(self, block_hash: Hash32) -> BaseBlock: """ Returns the requested block as specified by block hash. """ validate_word(block_hash, title="Block Hash") block_header = self.get_block_header_by_hash(block_hash) return self.get_block_by_header(block_header)
[ "def", "get_block_by_hash", "(", "self", ",", "block_hash", ":", "Hash32", ")", "->", "BaseBlock", ":", "validate_word", "(", "block_hash", ",", "title", "=", "\"Block Hash\"", ")", "block_header", "=", "self", ".", "get_block_header_by_hash", "(", "block_hash", ...
Returns the requested block as specified by block hash.
[ "Returns", "the", "requested", "block", "as", "specified", "by", "block", "hash", "." ]
python
train
ASMfreaK/habitipy
habitipy/api.py
https://github.com/ASMfreaK/habitipy/blob/555b8b20faf6d553353092614a8a0d612f0adbde/habitipy/api.py#L439-L446
def validate(self, obj): """check if obj has this api param""" if self.path: for i in self.path: obj = obj[i] obj = obj[self.field] raise NotImplementedError('Validation is not implemented yet')
[ "def", "validate", "(", "self", ",", "obj", ")", ":", "if", "self", ".", "path", ":", "for", "i", "in", "self", ".", "path", ":", "obj", "=", "obj", "[", "i", "]", "obj", "=", "obj", "[", "self", ".", "field", "]", "raise", "NotImplementedError",...
check if obj has this api param
[ "check", "if", "obj", "has", "this", "api", "param" ]
python
train
opentok/Opentok-Python-SDK
opentok/opentok.py
https://github.com/opentok/Opentok-Python-SDK/blob/ffc6714e76be0d29e6b56aff8cbf7509b71a8b2c/opentok/opentok.py#L896-L934
def set_broadcast_layout(self, broadcast_id, layout_type, stylesheet=None): """ Use this method to change the layout type of a live streaming broadcast :param String broadcast_id: The ID of the broadcast that will be updated :param String layout_type: The layout type for the broadcast. Valid values are: 'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation' :param String stylesheet optional: CSS used to style the custom layout. Specify this only if you set the type property to 'custom' """ payload = { 'type': layout_type, } if layout_type == 'custom': if stylesheet is not None: payload['stylesheet'] = stylesheet endpoint = self.endpoints.broadcast_url(broadcast_id, layout=True) response = requests.put( endpoint, data=json.dumps(payload), headers=self.json_headers(), proxies=self.proxies, timeout=self.timeout ) if response.status_code == 200: pass elif response.status_code == 400: raise BroadcastError( 'Invalid request. This response may indicate that data in your request data is ' 'invalid JSON. It may also indicate that you passed in invalid layout options.') elif response.status_code == 403: raise AuthError('Authentication error.') else: raise RequestError('OpenTok server error.', response.status_code)
[ "def", "set_broadcast_layout", "(", "self", ",", "broadcast_id", ",", "layout_type", ",", "stylesheet", "=", "None", ")", ":", "payload", "=", "{", "'type'", ":", "layout_type", ",", "}", "if", "layout_type", "==", "'custom'", ":", "if", "stylesheet", "is", ...
Use this method to change the layout type of a live streaming broadcast :param String broadcast_id: The ID of the broadcast that will be updated :param String layout_type: The layout type for the broadcast. Valid values are: 'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation' :param String stylesheet optional: CSS used to style the custom layout. Specify this only if you set the type property to 'custom'
[ "Use", "this", "method", "to", "change", "the", "layout", "type", "of", "a", "live", "streaming", "broadcast" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_acm.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_acm.py#L155-L170
def nacm_rule_list_rule_module_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") nacm = ET.SubElement(config, "nacm", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-acm") rule_list = ET.SubElement(nacm, "rule-list") name_key = ET.SubElement(rule_list, "name") name_key.text = kwargs.pop('name') rule = ET.SubElement(rule_list, "rule") name_key = ET.SubElement(rule, "name") name_key.text = kwargs.pop('name') module_name = ET.SubElement(rule, "module-name") module_name.text = kwargs.pop('module_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "nacm_rule_list_rule_module_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "nacm", "=", "ET", ".", "SubElement", "(", "config", ",", "\"nacm\"", ",", "xmlns", "=", "\"urn:ietf:para...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
gtaylor/python-colormath
colormath/color_diff_matrix.py
https://github.com/gtaylor/python-colormath/blob/1d168613718d2d7d31ec4230524e987ef66823c7/colormath/color_diff_matrix.py#L11-L17
def delta_e_cie1976(lab_color_vector, lab_color_matrix): """ Calculates the Delta E (CIE1976) between `lab_color_vector` and all colors in `lab_color_matrix`. """ return numpy.sqrt( numpy.sum(numpy.power(lab_color_vector - lab_color_matrix, 2), axis=1))
[ "def", "delta_e_cie1976", "(", "lab_color_vector", ",", "lab_color_matrix", ")", ":", "return", "numpy", ".", "sqrt", "(", "numpy", ".", "sum", "(", "numpy", ".", "power", "(", "lab_color_vector", "-", "lab_color_matrix", ",", "2", ")", ",", "axis", "=", "...
Calculates the Delta E (CIE1976) between `lab_color_vector` and all colors in `lab_color_matrix`.
[ "Calculates", "the", "Delta", "E", "(", "CIE1976", ")", "between", "lab_color_vector", "and", "all", "colors", "in", "lab_color_matrix", "." ]
python
train
dadadel/pyment
pyment/docstring.py
https://github.com/dadadel/pyment/blob/3d1bdf87d083ff56230bd0bf7c5252e20552b7b6/pyment/docstring.py#L190-L207
def get_return_list(self, data): """Get the list of returned values. The list contains tuples (name=None, desc, type=None) :param data: the data to proceed """ return_list = [] lst = self.get_list_key(data, 'return') for l in lst: name, desc, rtype = l if l[2] is None: rtype = l[0] name = None desc = desc.strip() return_list.append((name, desc, rtype)) return return_list
[ "def", "get_return_list", "(", "self", ",", "data", ")", ":", "return_list", "=", "[", "]", "lst", "=", "self", ".", "get_list_key", "(", "data", ",", "'return'", ")", "for", "l", "in", "lst", ":", "name", ",", "desc", ",", "rtype", "=", "l", "if",...
Get the list of returned values. The list contains tuples (name=None, desc, type=None) :param data: the data to proceed
[ "Get", "the", "list", "of", "returned", "values", ".", "The", "list", "contains", "tuples", "(", "name", "=", "None", "desc", "type", "=", "None", ")" ]
python
train
clalancette/pycdlib
pycdlib/pycdlib.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L3619-L3643
def _rm_joliet_dir(self, joliet_path): # type: (bytes) -> int ''' An internal method to remove a directory from the Joliet portion of the ISO. Parameters: joliet_path - The Joliet directory to remove. Returns: The number of bytes to remove from the ISO for this Joliet directory. ''' if self.joliet_vd is None: raise pycdlibexception.PyCdlibInternalError('Tried to remove joliet dir from non-Joliet ISO') log_block_size = self.joliet_vd.logical_block_size() joliet_child = self._find_joliet_record(joliet_path) num_bytes_to_remove = joliet_child.get_data_length() num_bytes_to_remove += self._remove_child_from_dr(joliet_child, joliet_child.index_in_parent, log_block_size) if joliet_child.ptr is None: raise pycdlibexception.PyCdlibInternalError('Joliet directory has no path table record; this should not be') if self.joliet_vd.remove_from_ptr_size(path_table_record.PathTableRecord.record_length(joliet_child.ptr.len_di)): num_bytes_to_remove += 4 * log_block_size return num_bytes_to_remove
[ "def", "_rm_joliet_dir", "(", "self", ",", "joliet_path", ")", ":", "# type: (bytes) -> int", "if", "self", ".", "joliet_vd", "is", "None", ":", "raise", "pycdlibexception", ".", "PyCdlibInternalError", "(", "'Tried to remove joliet dir from non-Joliet ISO'", ")", "log_...
An internal method to remove a directory from the Joliet portion of the ISO. Parameters: joliet_path - The Joliet directory to remove. Returns: The number of bytes to remove from the ISO for this Joliet directory.
[ "An", "internal", "method", "to", "remove", "a", "directory", "from", "the", "Joliet", "portion", "of", "the", "ISO", "." ]
python
train
tjcsl/ion
intranet/apps/announcements/views.py
https://github.com/tjcsl/ion/blob/5d722b0725d572039bb0929fd5715a4070c82c72/intranet/apps/announcements/views.py#L147-L184
def approve_announcement_view(request, req_id): """The approve announcement page. Teachers will be linked to this page from an email. req_id: The ID of the AnnouncementRequest """ req = get_object_or_404(AnnouncementRequest, id=req_id) requested_teachers = req.teachers_requested.all() logger.debug(requested_teachers) if request.user not in requested_teachers: messages.error(request, "You do not have permission to approve this announcement.") return redirect("index") if request.method == "POST": form = AnnouncementRequestForm(request.POST, instance=req) if form.is_valid(): obj = form.save(commit=True) # SAFE HTML obj.content = safe_html(obj.content) obj.save() if "approve" in request.POST: obj.teachers_approved.add(request.user) obj.save() if not obj.admin_email_sent: if settings.SEND_ANNOUNCEMENT_APPROVAL: admin_request_announcement_email(request, form, obj) obj.admin_email_sent = True obj.save() return redirect("approve_announcement_success") else: obj.save() return redirect("approve_announcement_reject") form = AnnouncementRequestForm(instance=req) context = {"form": form, "req": req, "admin_approve": False} return render(request, "announcements/approve.html", context)
[ "def", "approve_announcement_view", "(", "request", ",", "req_id", ")", ":", "req", "=", "get_object_or_404", "(", "AnnouncementRequest", ",", "id", "=", "req_id", ")", "requested_teachers", "=", "req", ".", "teachers_requested", ".", "all", "(", ")", "logger", ...
The approve announcement page. Teachers will be linked to this page from an email. req_id: The ID of the AnnouncementRequest
[ "The", "approve", "announcement", "page", ".", "Teachers", "will", "be", "linked", "to", "this", "page", "from", "an", "email", "." ]
python
train
Opentrons/opentrons
api/src/opentrons/protocol_api/labware.py
https://github.com/Opentrons/opentrons/blob/a7c15cc2636ecb64ab56c7edc1d8a57163aaeadf/api/src/opentrons/protocol_api/labware.py#L808-L827
def load_module_from_definition( definition: dict, parent: Location) -> \ Union[ModuleGeometry, ThermocyclerGeometry]: """ Return a :py:class:`ModuleGeometry` object from a specified definition :param definition: A dict representing all required data for a module's geometry. :param parent: A :py:class:`.Location` representing the location where the front and left most point of the outside of the module is (often the front-left corner of a slot on the deck). """ mod_name = definition['loadName'] if mod_name == 'thermocycler' or mod_name == 'semithermocycler': mod: Union[ModuleGeometry, ThermocyclerGeometry] = \ ThermocyclerGeometry(definition, parent) else: mod = ModuleGeometry(definition, parent) # TODO: calibration return mod
[ "def", "load_module_from_definition", "(", "definition", ":", "dict", ",", "parent", ":", "Location", ")", "->", "Union", "[", "ModuleGeometry", ",", "ThermocyclerGeometry", "]", ":", "mod_name", "=", "definition", "[", "'loadName'", "]", "if", "mod_name", "==",...
Return a :py:class:`ModuleGeometry` object from a specified definition :param definition: A dict representing all required data for a module's geometry. :param parent: A :py:class:`.Location` representing the location where the front and left most point of the outside of the module is (often the front-left corner of a slot on the deck).
[ "Return", "a", ":", "py", ":", "class", ":", "ModuleGeometry", "object", "from", "a", "specified", "definition" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/visuals/gridmesh.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/visuals/gridmesh.py#L55-L99
def set_data(self, xs=None, ys=None, zs=None, colors=None): '''Update the mesh data. Parameters ---------- xs : ndarray | None A 2d array of x coordinates for the vertices of the mesh. ys : ndarray | None A 2d array of y coordinates for the vertices of the mesh. zs : ndarray | None A 2d array of z coordinates for the vertices of the mesh. colors : ndarray | None The color at each point of the mesh. Must have shape (width, height, 4) or (width, height, 3) for rgba or rgb color definitions respectively. ''' if xs is None: xs = self._xs self.__vertices = None if ys is None: ys = self._ys self.__vertices = None if zs is None: zs = self._zs self.__vertices = None if self.__vertices is None: vertices, indices = create_grid_mesh(xs, ys, zs) self._xs = xs self._ys = ys self._zs = zs if self.__vertices is None: vertices, indices = create_grid_mesh(self._xs, self._ys, self._zs) self.__meshdata.set_vertices(vertices) self.__meshdata.set_faces(indices) if colors is not None: self.__meshdata.set_vertex_colors(colors.reshape( colors.shape[0] * colors.shape[1], colors.shape[2])) MeshVisual.set_data(self, meshdata=self.__meshdata)
[ "def", "set_data", "(", "self", ",", "xs", "=", "None", ",", "ys", "=", "None", ",", "zs", "=", "None", ",", "colors", "=", "None", ")", ":", "if", "xs", "is", "None", ":", "xs", "=", "self", ".", "_xs", "self", ".", "__vertices", "=", "None", ...
Update the mesh data. Parameters ---------- xs : ndarray | None A 2d array of x coordinates for the vertices of the mesh. ys : ndarray | None A 2d array of y coordinates for the vertices of the mesh. zs : ndarray | None A 2d array of z coordinates for the vertices of the mesh. colors : ndarray | None The color at each point of the mesh. Must have shape (width, height, 4) or (width, height, 3) for rgba or rgb color definitions respectively.
[ "Update", "the", "mesh", "data", "." ]
python
train
Nic30/hwt
hwt/synthesizer/param.py
https://github.com/Nic30/hwt/blob/8cbb399e326da3b22c233b98188a9d08dec057e6/hwt/synthesizer/param.py#L105-L115
def evalParam(p): """ Get value of parameter """ while isinstance(p, Param): p = p.get() if isinstance(p, RtlSignalBase): return p.staticEval() # use rather param inheritance instead of param as param value return toHVal(p)
[ "def", "evalParam", "(", "p", ")", ":", "while", "isinstance", "(", "p", ",", "Param", ")", ":", "p", "=", "p", ".", "get", "(", ")", "if", "isinstance", "(", "p", ",", "RtlSignalBase", ")", ":", "return", "p", ".", "staticEval", "(", ")", "# use...
Get value of parameter
[ "Get", "value", "of", "parameter" ]
python
test
hozn/stravalib
stravalib/attributes.py
https://github.com/hozn/stravalib/blob/5500ebc39e0bf4706bb1ca4c27b25e56becaaa5f/stravalib/attributes.py#L256-L274
def marshal(self, v): """ Turn this value into API format. Do a reverse dictionary lookup on choices to find the original value. If there are no keys or too many keys for now we raise a NotImplementedError as marshal is not used anywhere currently. In the future we will want to fail gracefully. """ if v: orig = [i for i in self.choices if self.choices[i] == v] if len(orig) == 1: return orig[0] elif len(orig) == 0: # No such choice raise NotImplementedError("No such reverse choice {0} for field {1}.".format(v, self)) else: # Too many choices. We could return one possible choice (e.g. orig[0]). raise NotImplementedError("Too many reverse choices {0} for value {1} for field {2}".format(orig, v, self))
[ "def", "marshal", "(", "self", ",", "v", ")", ":", "if", "v", ":", "orig", "=", "[", "i", "for", "i", "in", "self", ".", "choices", "if", "self", ".", "choices", "[", "i", "]", "==", "v", "]", "if", "len", "(", "orig", ")", "==", "1", ":", ...
Turn this value into API format. Do a reverse dictionary lookup on choices to find the original value. If there are no keys or too many keys for now we raise a NotImplementedError as marshal is not used anywhere currently. In the future we will want to fail gracefully.
[ "Turn", "this", "value", "into", "API", "format", "." ]
python
train
tensorflow/probability
tensorflow_probability/python/internal/monte_carlo.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/internal/monte_carlo.py#L30-L99
def expectation_importance_sampler(f, log_p, sampling_dist_q, z=None, n=None, seed=None, name='expectation_importance_sampler'): r"""Monte Carlo estimate of \\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\). With \\(p(z) := exp^{log_p(z)}\\), this `Op` returns \\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\) \\(\approx E_q[ f(Z) p(Z) / q(Z) ]\\) \\(= E_p[f(Z)]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. If `f >= 0`, it is up to 2x more efficient to exponentiate the result of `expectation_importance_sampler_logspace` applied to `Log[f]`. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `f` works "just like" `q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `sampling_dist_q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: The importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`. """ q = sampling_dist_q with tf.name_scope(name): z = _get_samples(q, z, n, seed) log_p_z = log_p(z) q_log_prob_z = q.log_prob(z) def _importance_sampler_positive_f(log_f_z): # Same as expectation_importance_sampler_logspace, but using Tensors # rather than samples and functions. Allows us to sample once. log_values = log_f_z + log_p_z - q_log_prob_z return _logspace_mean(log_values) # With \\(f_{plus}(z) = max(0, f(z)), f_{minus}(z) = max(0, -f(z))\\), # \\(E_p[f(Z)] = E_p[f_{plus}(Z)] - E_p[f_{minus}(Z)]\\) # \\( = E_p[f_{plus}(Z) + 1] - E_p[f_{minus}(Z) + 1]\\) # Without incurring bias, 1 is added to each to prevent zeros in logspace. # The logarithm is approximately linear around 1 + epsilon, so this is good # for small values of 'z' as well. f_z = f(z) log_f_plus_z = tf.math.log1p(tf.nn.relu(f_z)) log_f_minus_z = tf.math.log1p(tf.nn.relu(-1. * f_z)) log_f_plus_integral = _importance_sampler_positive_f(log_f_plus_z) log_f_minus_integral = _importance_sampler_positive_f(log_f_minus_z) return tf.math.exp(log_f_plus_integral) - tf.math.exp(log_f_minus_integral)
[ "def", "expectation_importance_sampler", "(", "f", ",", "log_p", ",", "sampling_dist_q", ",", "z", "=", "None", ",", "n", "=", "None", ",", "seed", "=", "None", ",", "name", "=", "'expectation_importance_sampler'", ")", ":", "q", "=", "sampling_dist_q", "wit...
r"""Monte Carlo estimate of \\(E_p[f(Z)] = E_q[f(Z) p(Z) / q(Z)]\\). With \\(p(z) := exp^{log_p(z)}\\), this `Op` returns \\(n^{-1} sum_{i=1}^n [ f(z_i) p(z_i) / q(z_i) ], z_i ~ q,\\) \\(\approx E_q[ f(Z) p(Z) / q(Z) ]\\) \\(= E_p[f(Z)]\\) This integral is done in log-space with max-subtraction to better handle the often extreme values that `f(z) p(z) / q(z)` can take on. If `f >= 0`, it is up to 2x more efficient to exponentiate the result of `expectation_importance_sampler_logspace` applied to `Log[f]`. User supplies either `Tensor` of samples `z`, or number of samples to draw `n` Args: f: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `f` works "just like" `q.log_prob`. log_p: Callable mapping samples from `sampling_dist_q` to `Tensors` with shape broadcastable to `q.batch_shape`. For example, `log_p` works "just like" `sampling_dist_q.log_prob`. sampling_dist_q: The sampling distribution. `tfp.distributions.Distribution`. `float64` `dtype` recommended. `log_p` and `q` should be supported on the same set. z: `Tensor` of samples from `q`, produced by `q.sample` for some `n`. n: Integer `Tensor`. Number of samples to generate if `z` is not provided. seed: Python integer to seed the random number generator. name: A name to give this `Op`. Returns: The importance sampling estimate. `Tensor` with `shape` equal to batch shape of `q`, and `dtype` = `q.dtype`.
[ "r", "Monte", "Carlo", "estimate", "of", "\\\\", "(", "E_p", "[", "f", "(", "Z", ")", "]", "=", "E_q", "[", "f", "(", "Z", ")", "p", "(", "Z", ")", "/", "q", "(", "Z", ")", "]", "\\\\", ")", "." ]
python
test
mitsei/dlkit
dlkit/json_/resource/searches.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/resource/searches.py#L217-L228
def get_bins(self): """Gets the bin list resulting from the search. return: (osid.resource.BinList) - the bin list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.* """ if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.BinList(self._results, runtime=self._runtime)
[ "def", "get_bins", "(", "self", ")", ":", "if", "self", ".", "retrieved", ":", "raise", "errors", ".", "IllegalState", "(", "'List has already been retrieved.'", ")", "self", ".", "retrieved", "=", "True", "return", "objects", ".", "BinList", "(", "self", "....
Gets the bin list resulting from the search. return: (osid.resource.BinList) - the bin list raise: IllegalState - list already retrieved *compliance: mandatory -- This method must be implemented.*
[ "Gets", "the", "bin", "list", "resulting", "from", "the", "search", "." ]
python
train
numenta/htmresearch
htmresearch/frameworks/location/path_integration_union_narrowing.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/frameworks/location/path_integration_union_narrowing.py#L279-L291
def getLocationRepresentation(self): """ Get the full population representation of the location layer. """ activeCells = np.array([], dtype="uint32") totalPrevCells = 0 for module in self.L6aModules: activeCells = np.append(activeCells, module.getActiveCells() + totalPrevCells) totalPrevCells += module.numberOfCells() return activeCells
[ "def", "getLocationRepresentation", "(", "self", ")", ":", "activeCells", "=", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "\"uint32\"", ")", "totalPrevCells", "=", "0", "for", "module", "in", "self", ".", "L6aModules", ":", "activeCells", "=", ...
Get the full population representation of the location layer.
[ "Get", "the", "full", "population", "representation", "of", "the", "location", "layer", "." ]
python
train
sckott/pygbif
pygbif/occurrences/download.py
https://github.com/sckott/pygbif/blob/bf54f2920bc46d97d7e2e1b0c8059e5878f3c5ab/pygbif/occurrences/download.py#L361-L391
def download_get(key, path=".", **kwargs): """ Get a download from GBIF. :param key: [str] A key generated from a request, like that from ``download`` :param path: [str] Path to write zip file to. Default: ``"."``, with a ``.zip`` appended to the end. :param **kwargs**: Further named arguments passed on to ``requests.get`` Downloads the zip file to a directory you specify on your machine. The speed of this function is of course proportional to the size of the file to download, and affected by your internet connection speed. This function only downloads the file. To open and read it, see https://github.com/BelgianBiodiversityPlatform/python-dwca-reader Usage:: from pygbif import occurrences as occ occ.download_get("0000066-140928181241064") occ.download_get("0003983-140910143529206") """ meta = pygbif.occurrences.download_meta(key) if meta['status'] != 'SUCCEEDED': raise Exception('download "%s" not of status SUCCEEDED' % key) else: print('Download file size: %s bytes' % meta['size']) url = 'http://api.gbif.org/v1/occurrence/download/request/' + key path = "%s/%s.zip" % (path, key) gbif_GET_write(url, path, **kwargs) print("On disk at " + path) return {'path': path, 'size': meta['size'], 'key': key}
[ "def", "download_get", "(", "key", ",", "path", "=", "\".\"", ",", "*", "*", "kwargs", ")", ":", "meta", "=", "pygbif", ".", "occurrences", ".", "download_meta", "(", "key", ")", "if", "meta", "[", "'status'", "]", "!=", "'SUCCEEDED'", ":", "raise", ...
Get a download from GBIF. :param key: [str] A key generated from a request, like that from ``download`` :param path: [str] Path to write zip file to. Default: ``"."``, with a ``.zip`` appended to the end. :param **kwargs**: Further named arguments passed on to ``requests.get`` Downloads the zip file to a directory you specify on your machine. The speed of this function is of course proportional to the size of the file to download, and affected by your internet connection speed. This function only downloads the file. To open and read it, see https://github.com/BelgianBiodiversityPlatform/python-dwca-reader Usage:: from pygbif import occurrences as occ occ.download_get("0000066-140928181241064") occ.download_get("0003983-140910143529206")
[ "Get", "a", "download", "from", "GBIF", "." ]
python
train
lingpy/sinopy
src/sinopy/sinopy.py
https://github.com/lingpy/sinopy/blob/59a47fcdfae3e0000ac6d2b3d7919bf875ec2056/src/sinopy/sinopy.py#L150-L164
def chars2gloss(chars): """ Get the TLS basic gloss for a characters. """ out = [] chars = gbk2big5(chars) for char in chars: tmp = [] if char in _cd.TLS: for entry in _cd.TLS[char]: baxter = _cd.TLS[char][entry]['UNIHAN_GLOSS'] if baxter != '?': tmp += [baxter] out += [','.join(tmp)] return out
[ "def", "chars2gloss", "(", "chars", ")", ":", "out", "=", "[", "]", "chars", "=", "gbk2big5", "(", "chars", ")", "for", "char", "in", "chars", ":", "tmp", "=", "[", "]", "if", "char", "in", "_cd", ".", "TLS", ":", "for", "entry", "in", "_cd", "...
Get the TLS basic gloss for a characters.
[ "Get", "the", "TLS", "basic", "gloss", "for", "a", "characters", "." ]
python
train
cassinyio/SwarmSpawner
cassinyspawner/swarmspawner.py
https://github.com/cassinyio/SwarmSpawner/blob/3c39134ef7e02e2afc5d18da7d18d2c69421ed08/cassinyspawner/swarmspawner.py#L260-L370
def start(self): """Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options """ # https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/user.py#L202 # By default jupyterhub calls the spawner passing user_options if self.use_user_options: user_options = self.user_options else: user_options = {} self.log.warn("user_options: {}".format(user_options)) service = yield self.get_service() if service is None: if 'name' in user_options: self.server_name = user_options['name'] if hasattr(self, 'container_spec') and self.container_spec is not None: container_spec = dict(**self.container_spec) elif user_options == {}: raise("A container_spec is needed in to create a service") container_spec.update(user_options.get('container_spec', {})) # iterates over mounts to create # a new mounts list of docker.types.Mount container_spec['mounts'] = [] for mount in self.container_spec['mounts']: m = dict(**mount) if 'source' in m: m['source'] = m['source'].format( username=self.service_owner) if 'driver_config' in m: device = m['driver_config']['options']['device'].format( username=self.service_owner ) m['driver_config']['options']['device'] = device m['driver_config'] = docker.types.DriverConfig( **m['driver_config']) container_spec['mounts'].append(docker.types.Mount(**m)) # some Envs are required by the single-user-image container_spec['env'] = self.get_env() if hasattr(self, 'resource_spec'): resource_spec = self.resource_spec resource_spec.update(user_options.get('resource_spec', {})) if hasattr(self, 'networks'): networks = self.networks if user_options.get('networks') is not None: networks = user_options.get('networks') if hasattr(self, 'placement'): placement = self.placement if user_options.get('placement') is not None: placement = user_options.get('placement') image = container_spec['Image'] del container_spec['Image'] # create the service container_spec = docker.types.ContainerSpec( image, **container_spec) resources = docker.types.Resources(**resource_spec) task_spec = {'container_spec': container_spec, 'resources': resources, 'placement': placement } task_tmpl = docker.types.TaskTemplate(**task_spec) resp = yield self.docker('create_service', task_tmpl, name=self.service_name, networks=networks) self.service_id = resp['ID'] self.log.info( "Created Docker service '%s' (id: %s) from image %s", self.service_name, self.service_id[:7], image) else: self.log.info( "Found existing Docker service '%s' (id: %s)", self.service_name, self.service_id[:7]) # Handle re-using API token. # Get the API token from the environment variables # of the running service: envs = service['Spec']['TaskTemplate']['ContainerSpec']['Env'] for line in envs: if line.startswith('JPY_API_TOKEN='): self.api_token = line.split('=', 1)[1] break ip = self.service_name port = self.service_port # we use service_name instead of ip # https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery # service_port is actually equal to 8888 return (ip, port)
[ "def", "start", "(", "self", ")", ":", "# https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/user.py#L202", "# By default jupyterhub calls the spawner passing user_options", "if", "self", ".", "use_user_options", ":", "user_options", "=", "self", ".", "user_options", ...
Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options
[ "Start", "the", "single", "-", "user", "server", "in", "a", "docker", "service", ".", "You", "can", "specify", "the", "params", "for", "the", "service", "through", "jupyterhub_config", ".", "py", "or", "using", "the", "user_options" ]
python
test
roaet/eh
eh/mdv/markdownviewer.py
https://github.com/roaet/eh/blob/9370864a9f1d65bb0f822d0aea83f1169c98f3bd/eh/mdv/markdownviewer.py#L271-L298
def style_ansi(raw_code, lang=None): """ actual code hilite """ lexer = 0 if lang: try: lexer = get_lexer_by_name(lang) except ValueError: print col(R, 'Lexer for %s not found' % lang) lexer = None if not lexer: try: if guess_lexer: lexer = pyg_guess_lexer(raw_code) except: pass if not lexer: lexer = get_lexer_by_name(def_lexer) tokens = lex(raw_code, lexer) cod = [] for t, v in tokens: if not v: continue _col = code_hl_tokens.get(t) if _col: cod.append(col(v, _col)) else: cod.append(v) return ''.join(cod)
[ "def", "style_ansi", "(", "raw_code", ",", "lang", "=", "None", ")", ":", "lexer", "=", "0", "if", "lang", ":", "try", ":", "lexer", "=", "get_lexer_by_name", "(", "lang", ")", "except", "ValueError", ":", "print", "col", "(", "R", ",", "'Lexer for %s ...
actual code hilite
[ "actual", "code", "hilite" ]
python
train
GetmeUK/MongoFrames
mongoframes/frames.py
https://github.com/GetmeUK/MongoFrames/blob/7d2bd792235dfa77a9deecab5366f5f73480823d/mongoframes/frames.py#L374-L394
def delete_many(cls, documents): """Delete multiple documents""" # Ensure all documents have been converted to frames frames = cls._ensure_frames(documents) all_count = len(documents) assert len([f for f in frames if '_id' in f._document]) == all_count, \ "Can't delete documents without `_id`s" # Send delete signal signal('delete').send(cls, frames=frames) # Prepare the documents to be deleted ids = [f._id for f in frames] # Delete the documents cls.get_collection().delete_many({'_id': {'$in': ids}}) # Send deleted signal signal('deleted').send(cls, frames=frames)
[ "def", "delete_many", "(", "cls", ",", "documents", ")", ":", "# Ensure all documents have been converted to frames", "frames", "=", "cls", ".", "_ensure_frames", "(", "documents", ")", "all_count", "=", "len", "(", "documents", ")", "assert", "len", "(", "[", "...
Delete multiple documents
[ "Delete", "multiple", "documents" ]
python
train
honeynet/beeswarm
beeswarm/drones/client/baits/telnet.py
https://github.com/honeynet/beeswarm/blob/db51ea0bc29f631c3e3b5312b479ac9d5e31079a/beeswarm/drones/client/baits/telnet.py#L124-L129
def connect(self): """ Open a new telnet session on the remote server. """ self.client = BaitTelnetClient(self.options['server'], self.options['port']) self.client.set_option_negotiation_callback(self.process_options)
[ "def", "connect", "(", "self", ")", ":", "self", ".", "client", "=", "BaitTelnetClient", "(", "self", ".", "options", "[", "'server'", "]", ",", "self", ".", "options", "[", "'port'", "]", ")", "self", ".", "client", ".", "set_option_negotiation_callback",...
Open a new telnet session on the remote server.
[ "Open", "a", "new", "telnet", "session", "on", "the", "remote", "server", "." ]
python
train
maxweisspoker/simplebitcoinfuncs
simplebitcoinfuncs/signandverify.py
https://github.com/maxweisspoker/simplebitcoinfuncs/blob/ad332433dfcc067e86d2e77fa0c8f1a27daffb63/simplebitcoinfuncs/signandverify.py#L185-L266
def signmsg(msg,priv,iscompressed,k=0): ''' Sign a message -- the message itself, not a hash -- with a given private key. Input private key must be hex, NOT WIF. Use wiftohex() found in .bitcoin in order to get the hex private key and whether it is (or rather, its public key is) compressed. 'iscompressed' is True/False bool for whether or not to indicate compression on the public key that corresponds to the input private key hex. 'iscompressed' is not defaulted to True like it is in most other functions, because it really matters whether you use it. All software implementations treat uncompressed and compressed keys as entirely different, and a valid message signature will NOT validate if the public key compression is not correct. Whereas for transaction signatures, only the r-value is checked, message signature validation additionally checks/verifies public key compression. So you must manually set it! Also, note that message signatures are an entirely different format from DER-encoded transaction signatures. Sample message, which includes the quotation marks, and has a new line and 4 spaces after the new line: "You miss 100% of the shots you don't take. -- Wayne Gretzky" -- Michael Scott >>> msg = '"You miss 100% of the shots you don\\'t take. -- Wayne Gretzky"\\n -- Michael Scott' >>> p = 'c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466' >>> k = 4 # chosen by fair dice roll, guaranteed to be random >>> signmsg(msg,p,True,k) 'H+ST2/HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam/poUZPl/PukXCrYBzuwMW3Tyyo=' Your software should then translate that data set into something akin to: -----BEGIN BITCOIN SIGNED MESSAGE----- "You miss 100% of the shots you don't take. -- Wayne Gretzky" -- Michael Scott -----BEGIN BITCOIN SIGNATURE----- Address: 1AuZ7wby1rUVzwFvFgySeTFS7JcHN2TeGs H+ST2/HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam/poUZPl/PukXCrYBzuwMW3Tyyo= -----END BITCOIN SIGNATURE----- ''' omsg = msg # Stripping carraige returns is standard practice in every # implementation I found, including Bitcoin Core msg = msg.replace("\r\n","\n") msg1 = hexstrlify(bytearray("\x18Bitcoin Signed Message:\n",'utf-8')) msg2 = tovarint(len(msg)) msg3 = hexstrlify(bytearray(msg,'utf-8')) msg = hash256(msg1 + msg2 + msg3) sig = sign(msg,priv,k) # Bitcoin message signature format doesn't use DER leading '00's # Although, r/s must be 64-char, so they are zfilled to that rlen = 2*int(sig[6:8],16) r = sig[8:8+(rlen)].lstrip("0").zfill(64) slen = 2*int(sig[10+(rlen):12+(rlen)],16) s = sig[12+(rlen):(12+(rlen)+(slen))].lstrip("0").zfill(64) pubkey = privtopub(priv,iscompressed) for i in range(4): prefix = 27 + i if iscompressed: prefix = prefix + 4 o = base64.b64encode(unhexlify(dechex(prefix,1) + r + s)) if str(o)[:2] == "b'": # Fuck you, Python 3 o = str(o)[2:-1] if verifymsg(omsg,o) == pubkey: return o raise Exception("Unknown failure. This method should never reach the end.")
[ "def", "signmsg", "(", "msg", ",", "priv", ",", "iscompressed", ",", "k", "=", "0", ")", ":", "omsg", "=", "msg", "# Stripping carraige returns is standard practice in every", "# implementation I found, including Bitcoin Core", "msg", "=", "msg", ".", "replace", "(", ...
Sign a message -- the message itself, not a hash -- with a given private key. Input private key must be hex, NOT WIF. Use wiftohex() found in .bitcoin in order to get the hex private key and whether it is (or rather, its public key is) compressed. 'iscompressed' is True/False bool for whether or not to indicate compression on the public key that corresponds to the input private key hex. 'iscompressed' is not defaulted to True like it is in most other functions, because it really matters whether you use it. All software implementations treat uncompressed and compressed keys as entirely different, and a valid message signature will NOT validate if the public key compression is not correct. Whereas for transaction signatures, only the r-value is checked, message signature validation additionally checks/verifies public key compression. So you must manually set it! Also, note that message signatures are an entirely different format from DER-encoded transaction signatures. Sample message, which includes the quotation marks, and has a new line and 4 spaces after the new line: "You miss 100% of the shots you don't take. -- Wayne Gretzky" -- Michael Scott >>> msg = '"You miss 100% of the shots you don\\'t take. -- Wayne Gretzky"\\n -- Michael Scott' >>> p = 'c05694a7af0e01dceb63e5912a415c28d3fc823ca1fd3fa34d41afde03740466' >>> k = 4 # chosen by fair dice roll, guaranteed to be random >>> signmsg(msg,p,True,k) 'H+ST2/HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam/poUZPl/PukXCrYBzuwMW3Tyyo=' Your software should then translate that data set into something akin to: -----BEGIN BITCOIN SIGNED MESSAGE----- "You miss 100% of the shots you don't take. -- Wayne Gretzky" -- Michael Scott -----BEGIN BITCOIN SIGNATURE----- Address: 1AuZ7wby1rUVzwFvFgySeTFS7JcHN2TeGs H+ST2/HBDYDzWB5JBJMLFATMbBOQDuB1hHT6lKvoxM0TBxoLMWsgrFmA3CGam/poUZPl/PukXCrYBzuwMW3Tyyo= -----END BITCOIN SIGNATURE-----
[ "Sign", "a", "message", "--", "the", "message", "itself", "not", "a", "hash", "--", "with", "a", "given", "private", "key", "." ]
python
train
phoebe-project/phoebe2
phoebe/frontend/bundle.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/frontend/bundle.py#L2183-L2408
def add_dataset(self, kind, component=None, **kwargs): """ Add a new dataset to the bundle. If not provided, 'dataset' (the name of the new dataset) will be created for you and can be accessed by the 'dataset' attribute of the returned ParameterSet. For light curves, the light curve will be generated for the entire system. For radial velocities, you need to provide a list of components for which values should be computed. Available kinds include: * :func:`phoebe.parameters.dataset.lc` * :func:`phoebe.parameters.dataset.rv` * :func:`phoebe.parameters.dataset.etv` * :func:`phoebe.parameters.dataset.orb` * :func:`phoebe.parameters.dataset.mesh` * :func:`phoebe.parameters.dataset.lp` :parameter kind: function to call that returns a ParameterSet or list of parameters. This must either be a callable function that accepts nothing but default values, or the name of a function (as a string) that can be found in the :mod:`phoebe.parameters.dataset` module :type kind: str or callable :parameter component: a list of components for which to compute the observables. For light curves this should be left at None to always compute the light curve for the entire system. For most other types, you need to provide at least one component. :type component: str or list of strings or None :parameter str dataset: (optional) name of the newly-created dataset :parameter **kwargs: default values for any of the newly-created parameters :return: :class:`phoebe.parameters.parameters.ParameterSet` of all parameters that have been added :raises NotImplementedError: if required constraint is not implemented """ sing_plural = {} sing_plural['time'] = 'times' sing_plural['flux'] = 'fluxes' sing_plural['sigma'] = 'sigmas' sing_plural['rv'] = 'rvs' func = _get_add_func(_dataset, kind.lower() if isinstance(kind, str) else kind) kwargs.setdefault('dataset', self._default_label(func.func_name, **{'context': 'dataset', 'kind': func.func_name})) if kwargs.pop('check_label', True): self._check_label(kwargs['dataset']) kind = func.func_name # Let's remember if the user passed components or if they were automatically assigned user_provided_components = component or kwargs.get('components', False) if kind == 'lc': allowed_components = [None] default_components = allowed_components elif kind in ['rv', 'orb']: allowed_components = self.hierarchy.get_stars() # + self.hierarchy.get_orbits() default_components = self.hierarchy.get_stars() # TODO: how are we going to handle overcontacts dynamical vs flux-weighted elif kind in ['mesh']: # allowed_components = self.hierarchy.get_meshables() allowed_components = [None] # allowed_components = self.hierarchy.get_stars() # TODO: how will this work when changing hierarchy to add/remove the common envelope? default_components = allowed_components elif kind in ['etv']: hier = self.hierarchy stars = hier.get_stars() # only include components in which the sibling is also a star that # means that the companion in a triple cannot be timed, because how # do we know who it's eclipsing? allowed_components = [s for s in stars if hier.get_sibling_of(s) in stars] default_components = allowed_components elif kind in ['lp']: # TODO: need to think about what this should be for contacts... allowed_components = self.hierarchy.get_stars() + self.hierarchy.get_orbits() default_components = [self.hierarchy.get_top()] else: allowed_components = [None] default_components = [None] # Let's handle the case where the user accidentally sends components # instead of component if kwargs.get('components', None) and component is None: logger.warning("assuming you meant 'component' instead of 'components'") components = kwargs.pop('components') else: components = component if isinstance(components, str): components = [components] elif hasattr(components, '__iter__'): components = components elif components is None: components = default_components else: raise NotImplementedError # Let's handle the case where the user accidentally sends singular # instead of plural (since we used to have this) # TODO: use parameter._singular_to_plural? for singular, plural in sing_plural.items(): if kwargs.get(singular, None) is not None and kwargs.get(plural, None) is None: logger.warning("assuming you meant '{}' instead of '{}'".format(plural, singular)) kwargs[plural] = kwargs.pop(singular) if not np.all([component in allowed_components for component in components]): raise ValueError("'{}' not a recognized/allowable component".format(component)) obs_metawargs = {'context': 'dataset', 'kind': kind, 'dataset': kwargs['dataset']} if kind in ['lp']: # then times needs to be passed now to duplicate and tag the Parameters # correctly obs_kwargs = {'times': kwargs.pop('times', [])} else: obs_kwargs = {} obs_params, constraints = func(**obs_kwargs) self._attach_params(obs_params, **obs_metawargs) for constraint in constraints: # TODO: tricky thing here will be copying the constraints self.add_constraint(*constraint) dep_func = _get_add_func(_dataset, "{}_dep".format(kind)) dep_metawargs = {'context': 'dataset', 'kind': '{}_dep'.format(kind), 'dataset': kwargs['dataset']} dep_params = dep_func() self._attach_params(dep_params, **dep_metawargs) # Now we need to apply any kwargs sent by the user. There are a few # scenarios (and each kwargs could fall into different ones): # times = [0,1,2] # in this case, we want to apply time across all of the components that # are applicable for this dataset kind AND to _default so that any # future components added to the system are copied appropriately # times = [0,1,2], components=['primary', 'secondary'] # in this case, we want to apply the value for time across components # but time@_default should remain empty (it will not copy for components # added in the future) # times = {'primary': [0,1], 'secondary': [0,1,2]} # here, regardless of the components, we want to apply these to their # individually requested parameters. We won't touch _default unless # its included in the dictionary # this needs to happen before kwargs get applied so that the default # values can be overridden by the supplied kwargs self._handle_pblum_defaults() self._handle_dataset_selectparams() for k, v in kwargs.items(): if isinstance(v, dict): for component, value in v.items(): logger.debug("setting value of dataset parameter: qualifier={}, dataset={}, component={}, value={}".format(k, kwargs['dataset'], component, value)) try: self.set_value_all(qualifier=k, dataset=kwargs['dataset'], component=component, value=value, check_visible=False, ignore_none=True) except: self.remove_dataset(dataset=kwargs['dataset']) raise ValueError("could not set value for {}={}, dataset has not been added".format(k, value)) elif k in ['dataset']: pass else: # for dataset kinds that include passband dependent AND # independent parameters, we need to carefully default on # what component to use when passing the defaults if kind in ['rv', 'lp'] and k in ['ld_func', 'ld_coeffs', 'passband', 'intens_weighting', 'profile_rest', 'profile_func', 'profile_sv']: # passband-dependent (ie lc_dep) parameters do not have # assigned components components_ = None elif components == [None]: components_ = None elif user_provided_components: components_ = components else: components_ = components+['_default'] logger.debug("setting value of dataset parameter: qualifier={}, dataset={}, component={}, value={}".format(k, kwargs['dataset'], components_, v)) try: self.set_value_all(qualifier=k, dataset=kwargs['dataset'], component=components_, value=v, check_visible=False, ignore_none=True) except: self.remove_dataset(dataset=kwargs['dataset']) raise ValueError("could not set value for {}={}, dataset has not been added".format(k, v)) redo_kwargs = deepcopy({k:v if not isinstance(v, nparray.ndarray) else v.to_json() for k,v in kwargs.items()}) redo_kwargs['func'] = func.func_name self._add_history(redo_func='add_dataset', redo_kwargs=redo_kwargs, undo_func='remove_dataset', undo_kwargs={'dataset': kwargs['dataset']}) # since we've already processed (so that we can get the new qualifiers), # we'll only raise a warning self._kwargs_checks(kwargs, warning_only=True) return self.filter(dataset=kwargs['dataset'])
[ "def", "add_dataset", "(", "self", ",", "kind", ",", "component", "=", "None", ",", "*", "*", "kwargs", ")", ":", "sing_plural", "=", "{", "}", "sing_plural", "[", "'time'", "]", "=", "'times'", "sing_plural", "[", "'flux'", "]", "=", "'fluxes'", "sing...
Add a new dataset to the bundle. If not provided, 'dataset' (the name of the new dataset) will be created for you and can be accessed by the 'dataset' attribute of the returned ParameterSet. For light curves, the light curve will be generated for the entire system. For radial velocities, you need to provide a list of components for which values should be computed. Available kinds include: * :func:`phoebe.parameters.dataset.lc` * :func:`phoebe.parameters.dataset.rv` * :func:`phoebe.parameters.dataset.etv` * :func:`phoebe.parameters.dataset.orb` * :func:`phoebe.parameters.dataset.mesh` * :func:`phoebe.parameters.dataset.lp` :parameter kind: function to call that returns a ParameterSet or list of parameters. This must either be a callable function that accepts nothing but default values, or the name of a function (as a string) that can be found in the :mod:`phoebe.parameters.dataset` module :type kind: str or callable :parameter component: a list of components for which to compute the observables. For light curves this should be left at None to always compute the light curve for the entire system. For most other types, you need to provide at least one component. :type component: str or list of strings or None :parameter str dataset: (optional) name of the newly-created dataset :parameter **kwargs: default values for any of the newly-created parameters :return: :class:`phoebe.parameters.parameters.ParameterSet` of all parameters that have been added :raises NotImplementedError: if required constraint is not implemented
[ "Add", "a", "new", "dataset", "to", "the", "bundle", ".", "If", "not", "provided", "dataset", "(", "the", "name", "of", "the", "new", "dataset", ")", "will", "be", "created", "for", "you", "and", "can", "be", "accessed", "by", "the", "dataset", "attrib...
python
train
tilezen/tilequeue
tilequeue/rawr.py
https://github.com/tilezen/tilequeue/blob/d7b9484ab92e246eb2773949c784ebb37c731e28/tilequeue/rawr.py#L542-L561
def unpack_rawr_zip_payload(table_sources, payload): """unpack a zipfile and turn it into a callable "tables" object.""" # the io we get from S3 is streaming, so we can't seek on it, but zipfile # seems to require that. so we buffer it all in memory. RAWR tiles are # generally up to around 100MB in size, which should be safe to store in # RAM. from tilequeue.query.common import Table from io import BytesIO zfh = zipfile.ZipFile(BytesIO(payload), 'r') def get_table(table_name): # need to extract the whole compressed file from zip reader, as it # doesn't support .tell() on the filelike, which gzip requires. data = zfh.open(table_name, 'r').read() unpacker = Unpacker(file_like=BytesIO(data)) source = table_sources[table_name] return Table(source, unpacker) return get_table
[ "def", "unpack_rawr_zip_payload", "(", "table_sources", ",", "payload", ")", ":", "# the io we get from S3 is streaming, so we can't seek on it, but zipfile", "# seems to require that. so we buffer it all in memory. RAWR tiles are", "# generally up to around 100MB in size, which should be safe t...
unpack a zipfile and turn it into a callable "tables" object.
[ "unpack", "a", "zipfile", "and", "turn", "it", "into", "a", "callable", "tables", "object", "." ]
python
train
Yelp/threat_intel
threat_intel/util/api_cache.py
https://github.com/Yelp/threat_intel/blob/60eef841d7cca115ec7857aeb9c553b72b694851/threat_intel/util/api_cache.py#L71-L80
def lookup_value(self, api_name, key): """Add the value of an API call to the cache. Args: api_name: a string name of the API. Keys and values are segmented by api_name. key: a string key for the specific call. """ if api_name in self._cache: return self._cache[api_name].get(key, None) return None
[ "def", "lookup_value", "(", "self", ",", "api_name", ",", "key", ")", ":", "if", "api_name", "in", "self", ".", "_cache", ":", "return", "self", ".", "_cache", "[", "api_name", "]", ".", "get", "(", "key", ",", "None", ")", "return", "None" ]
Add the value of an API call to the cache. Args: api_name: a string name of the API. Keys and values are segmented by api_name. key: a string key for the specific call.
[ "Add", "the", "value", "of", "an", "API", "call", "to", "the", "cache", "." ]
python
train
JoeVirtual/KonFoo
konfoo/options.py
https://github.com/JoeVirtual/KonFoo/blob/0c62ef5c2bed4deaf908b34082e4de2544532fdc/konfoo/options.py#L72-L87
def verbose_option(default=False): """ Attaches the option ``verbose`` with its *default* value to the keyword arguments when the option does not exist. All positional arguments and keyword arguments are forwarded unchanged. """ def decorator(method): @wraps(method) def wrapper(*args, **kwargs): option = Option.verbose.value kwargs[option] = kwargs.get(option, bool(default)) return method(*args, **kwargs) return wrapper return decorator
[ "def", "verbose_option", "(", "default", "=", "False", ")", ":", "def", "decorator", "(", "method", ")", ":", "@", "wraps", "(", "method", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "option", "=", "Option", ".", "ve...
Attaches the option ``verbose`` with its *default* value to the keyword arguments when the option does not exist. All positional arguments and keyword arguments are forwarded unchanged.
[ "Attaches", "the", "option", "verbose", "with", "its", "*", "default", "*", "value", "to", "the", "keyword", "arguments", "when", "the", "option", "does", "not", "exist", ".", "All", "positional", "arguments", "and", "keyword", "arguments", "are", "forwarded",...
python
train
WebarchivCZ/WA-KAT
src/wa_kat/templates/static/js/Lib/site-packages/components/progressbar.py
https://github.com/WebarchivCZ/WA-KAT/blob/16d064a3a775dc1d2713debda7847ded52dd2a06/src/wa_kat/templates/static/js/Lib/site-packages/components/progressbar.py#L69-L78
def reset(self): """ Reset the progressbar to 0, hide it and set original text message at background. """ self.hide() self.tag.class_name = "progress-bar progress-bar-striped active" self.tag.aria_valuemin = 0 self.tag.style.width = "{}%".format(0) self.tag.text = self.original_message
[ "def", "reset", "(", "self", ")", ":", "self", ".", "hide", "(", ")", "self", ".", "tag", ".", "class_name", "=", "\"progress-bar progress-bar-striped active\"", "self", ".", "tag", ".", "aria_valuemin", "=", "0", "self", ".", "tag", ".", "style", ".", "...
Reset the progressbar to 0, hide it and set original text message at background.
[ "Reset", "the", "progressbar", "to", "0", "hide", "it", "and", "set", "original", "text", "message", "at", "background", "." ]
python
train
radjkarl/fancyWidgets
DUMP/pyqtgraphBased/parametertree/parameterTypes.py
https://github.com/radjkarl/fancyWidgets/blob/ffe0d5747c5296c78575f0e0909af915a4a5698f/DUMP/pyqtgraphBased/parametertree/parameterTypes.py#L258-L266
def limitsChanged(self, param, limits): """Called when the parameter's limits have changed""" ParameterItem.limitsChanged(self, param, limits) t = self.param.opts['type'] if t == 'int' or t == 'float': self.widget.setOpts(bounds=limits) else: return
[ "def", "limitsChanged", "(", "self", ",", "param", ",", "limits", ")", ":", "ParameterItem", ".", "limitsChanged", "(", "self", ",", "param", ",", "limits", ")", "t", "=", "self", ".", "param", ".", "opts", "[", "'type'", "]", "if", "t", "==", "'int'...
Called when the parameter's limits have changed
[ "Called", "when", "the", "parameter", "s", "limits", "have", "changed" ]
python
train
EliotBerriot/django-dynamic-preferences
dynamic_preferences/managers.py
https://github.com/EliotBerriot/django-dynamic-preferences/blob/12eab4f17b960290525b215d954d1b5fb91199df/dynamic_preferences/managers.py#L122-L139
def get(self, key, no_cache=False): """Return the value of a single preference using a dotted path key :arg no_cache: if true, the cache is bypassed """ section, name = self.parse_lookup(key) preference = self.registry.get( section=section, name=name, fallback=False) if no_cache or not preferences_settings.ENABLE_CACHE: return self.get_db_pref(section=section, name=name).value try: return self.from_cache(section, name) except CachedValueNotFound: pass db_pref = self.get_db_pref(section=section, name=name) self.to_cache(db_pref) return db_pref.value
[ "def", "get", "(", "self", ",", "key", ",", "no_cache", "=", "False", ")", ":", "section", ",", "name", "=", "self", ".", "parse_lookup", "(", "key", ")", "preference", "=", "self", ".", "registry", ".", "get", "(", "section", "=", "section", ",", ...
Return the value of a single preference using a dotted path key :arg no_cache: if true, the cache is bypassed
[ "Return", "the", "value", "of", "a", "single", "preference", "using", "a", "dotted", "path", "key", ":", "arg", "no_cache", ":", "if", "true", "the", "cache", "is", "bypassed" ]
python
train
PSPC-SPAC-buyandsell/von_anchor
von_anchor/cache.py
https://github.com/PSPC-SPAC-buyandsell/von_anchor/blob/78ac1de67be42a676274f4bf71fe12f66e72f309/von_anchor/cache.py#L596-L625
async def get_state_json( self, rr_state_builder: Callable[['Verifier', str, int], Awaitable[Tuple[str, int]]], fro: int, to: int) -> (str, int): """ Get rev reg state json, and its timestamp on the distributed ledger, from cached rev reg state frames list or distributed ledger, updating cache as necessary. Raise BadRevStateTime if caller asks for a state in the future. On return of any previously existing rev reg state frame, always update its query time beforehand. :param rr_state_builder: callback to build rev reg state if need be (specify anchor instance's _build_rr_state()) :param fro: least time (epoch seconds) of interest; lower-bounds 'to' on frame housing return data :param to: greatest time (epoch seconds) of interest; upper-bounds returned revocation state timestamp :return: rev reg state json and ledger timestamp (epoch seconds) """ LOGGER.debug( 'RevoCacheEntry.get_state_json >>> rr_state_builder: %s, fro: %s, to: %s', rr_state_builder.__name__, fro, to) rv = await self._get_update(rr_state_builder, fro, to, False) LOGGER.debug('RevoCacheEntry.get_state_json <<< %s', rv) return rv
[ "async", "def", "get_state_json", "(", "self", ",", "rr_state_builder", ":", "Callable", "[", "[", "'Verifier'", ",", "str", ",", "int", "]", ",", "Awaitable", "[", "Tuple", "[", "str", ",", "int", "]", "]", "]", ",", "fro", ":", "int", ",", "to", ...
Get rev reg state json, and its timestamp on the distributed ledger, from cached rev reg state frames list or distributed ledger, updating cache as necessary. Raise BadRevStateTime if caller asks for a state in the future. On return of any previously existing rev reg state frame, always update its query time beforehand. :param rr_state_builder: callback to build rev reg state if need be (specify anchor instance's _build_rr_state()) :param fro: least time (epoch seconds) of interest; lower-bounds 'to' on frame housing return data :param to: greatest time (epoch seconds) of interest; upper-bounds returned revocation state timestamp :return: rev reg state json and ledger timestamp (epoch seconds)
[ "Get", "rev", "reg", "state", "json", "and", "its", "timestamp", "on", "the", "distributed", "ledger", "from", "cached", "rev", "reg", "state", "frames", "list", "or", "distributed", "ledger", "updating", "cache", "as", "necessary", "." ]
python
train
radjkarl/imgProcessor
imgProcessor/camera/lens/estimateSystematicErrorLensCorrection.py
https://github.com/radjkarl/imgProcessor/blob/7c5a28718f81c01a430152c60a686ac50afbfd7c/imgProcessor/camera/lens/estimateSystematicErrorLensCorrection.py#L24-L384
def simulateSytematicError(N_SAMPLES=5, N_IMAGES=10, SHOW_DETECTED_PATTERN=True, # GRAYSCALE=False, HEIGHT=500, PLOT_RESULTS=True, PLOT_ERROR_ARRAY=True, CAMERA_PARAM=None, PERSPECTIVE=True, ROTATION=True, RELATIVE_PATTERN_SIZE=0.5, POSITION=True, NOISE=25, BLUR=(3, 3), PATTERNS=None): ''' Simulates a lens calibration using synthetic images * images are rendered under the given HEIGHT resolution * noise and smoothing is applied * perspective and position errors are applied * images are deformed using the given CAMERA_PARAM * the detected camera parameters are used to calculate the error to the given ones simulation ----------- N_IMAGES -> number of images to take for a camera calibration N_SAMPLES -> number of camera calibrations of each pattern type output -------- SHOW_DETECTED_PATTERN: print each image and detected pattern to screen PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters PLOT_ERROR_ARRAY: plot position error for the lens correction pattern -------- this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles GRAYSCALE: whether to load the pattern as gray scale RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%) PERSPECTIVE: [True] -> enable perspective distortion ROTATION: [True] -> enable rotation of the pattern BLUR: False or (sizex,sizey), like (3,3) CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2] ''' print( 'calculate systematic error of the implemented calibration algorithms') # LOCATION OF PATTERN IMAGES folder = MEDIA_PATH if PATTERNS is None: PATTERNS = ('Chessboard', 'Asymmetric circles', 'Symmetric circles') patterns = OrderedDict(( # n of inner corners ('Chessboard', ((6, 9), 'chessboard_pattern_a3.svg')), ('Asymmetric circles', ((4, 11), 'acircles_pattern_a3.svg')), ('Symmetric circles', ((8, 11), 'circles_pattern_a3.svg')), )) # REMOVE PATTERNS THAT ARE NOT TO BE TESTED: [patterns.pop(key) for key in patterns if key not in PATTERNS] if SHOW_DETECTED_PATTERN: cv2.namedWindow('Pattern', cv2.WINDOW_NORMAL) # number of positive detected patterns: success = [] # list[N_SAMPLES] of random camera parameters fx, fy, cx, cy, k1, k2, k3, p1, p2 = [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of given-detected parameters: errl, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l = [ ], [], [], [], [], [], [], [], [], [] # list[Method, N_SAMPLES] of magnitude(difference of displacement vector # array): dxl = [] dyl = [] # maintain aspect ratio of din a4, a3...: aspect_ratio_DIN = 2.0**0.5 width = int(round(HEIGHT / aspect_ratio_DIN)) if CAMERA_PARAM is None: CAMERA_PARAM = [ HEIGHT, HEIGHT, HEIGHT / 2, width / 2, 0.0, 0.01, 0.1, 0.01, 0.001] # ???CREATE N DIFFERENT RANDOM LENS ERRORS: for n in range(N_SAMPLES): # TODO: RANDOMIZE CAMERA ERROR?? fx.append(CAMERA_PARAM[0]) # * np.random.uniform(1, 2) ) fy.append(CAMERA_PARAM[1]) # * np.random.uniform(1, 2) ) cx.append(CAMERA_PARAM[2]) # * np.random.uniform(0.9, 1.1) ) cy.append(CAMERA_PARAM[3]) # * np.random.uniform(0.9, 1.1) ) k1.append(CAMERA_PARAM[4]) # + np.random.uniform(-1, 1)*0.1) k2.append(CAMERA_PARAM[5]) # + np.random.uniform(-1, 1)*0.01) p1.append(CAMERA_PARAM[6]) # + np.random.uniform(0, 1)*0.1) p2.append(CAMERA_PARAM[7]) # + np.random.uniform(0, 1)*0.01) k3.append(CAMERA_PARAM[8]) # + np.random.uniform(0, 1)*0.001) L = LensDistortion() # FOR EVERY METHOD: for method, (board_size, filename) in patterns.items(): f = folder.join(filename) # LOAD THE SVG FILE, AND SAVE IT WITH NEW RESOLUTION: svg = QtSvg.QSvgRenderer(f) image = QtGui.QImage(width * 4, HEIGHT * 4, QtGui.QImage.Format_ARGB32) image.fill(QtCore.Qt.white) # Get QPainter that paints to the image painter = QtGui.QPainter(image) svg.render(painter) # Save, image format based on file extension # f = "rendered.png" # image.save(f) # # if GRAYSCALE: # img = cv2.imread(f, cv2.IMREAD_GRAYSCALE) # else: # img = cv2.imread(f) img = qImageToArray(image) success.append([]) fxl.append([]) errl.append([]) fyl.append([]) cxl.append([]) cyl.append([]) k1l.append([]) k2l.append([]) k3l.append([]) p1l.append([]) p2l.append([]) dxl.append([]) dyl.append([]) imgHeight, imgWidth = img.shape[0], img.shape[1] for n in range(N_SAMPLES): L.calibrate(board_size, method) print('SET PARAMS:', fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L.setCameraParams( fx[n], fy[n], cx[n], cy[n], k1[n], k2[n], k3[n], p1[n], p2[n]) L._coeffs['shape'] = (imgHeight, imgWidth) hw = imgWidth * 0.5 hh = imgHeight * 0.5 for m in range(N_IMAGES): pts1 = np.float32([[hw, hh + 100], [hw - 100, hh - 100], [hw + 100, hh - 100]]) pts2 = pts1.copy() if ROTATION: rotatePolygon(pts2, np.random.randint(0, 2 * np.pi)) if PERSPECTIVE: # CREATE A RANDOM PERSPECTIVE: pts2 += np.random.randint(-hw * 0.05, hh * 0.05, size=(3, 2)) # MAKE SURE THAT THE PATTERN IS FULLY WITHIN THE IMAGE: pts2 *= RELATIVE_PATTERN_SIZE # MOVE TO THE CENTER pts2[:, 0] += hw * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * (1 - RELATIVE_PATTERN_SIZE) if POSITION: f = ((2 * np.random.rand(2)) - 1) pts2[:, 0] += hw * 0.7 * f[0] * (1 - RELATIVE_PATTERN_SIZE) pts2[:, 1] += hh * 0.7 * f[1] * (1 - RELATIVE_PATTERN_SIZE) # EXEC PERSPECTICE, POSITION, ROTATION: M = cv2.getAffineTransform(pts1, pts2) img_warped = cv2.warpAffine( img, M, (imgWidth, imgHeight), borderValue=(230, 230, 230)) # DOWNSCALE IMAGE AGAIN - UPSCALING AND DOWNSCALING SHOULD BRING THE ERRROR # WARPING DOWN img_warped = cv2.resize(img_warped, (width, HEIGHT)) # CREATE THE LENS DISTORTION: mapx, mapy = L.getDistortRectifyMap(width, HEIGHT) # print 664, mapx.shape img_distorted = cv2.remap( img_warped, mapx, mapy, cv2.INTER_LINEAR, borderValue=(230, 230, 230)) # img_distorted[img_distorted==0]=20 # img_distorted[img_distorted>100]=230 if BLUR: img_distorted = cv2.blur(img_distorted, BLUR) if NOISE: # soften, black and white more gray, and add noise img_distorted = img_distorted.astype(np.int16) img_distorted += (np.random.rand(*img_distorted.shape) * NOISE).astype(img_distorted.dtype) img_distorted = np.clip( img_distorted, 0, 255).astype(np.uint8) # plt.imshow(img_distorted) # plt.show() found = L.addImg(img_distorted) if SHOW_DETECTED_PATTERN and found: img_distorted = L.drawChessboard(img_distorted) cv2.imshow('Pattern', img_distorted) cv2.waitKey(1) success[-1].append(L.findCount) try: L._coeffs = None errl[-1].append(L.coeffs['reprojectionError']) L.correct(img_distorted) c = L.getCameraParams() print('GET PARAMS:', c) fxl[-1].append(fx[n] - c[0]) fyl[-1].append(fy[n] - c[1]) cxl[-1].append(cx[n] - c[2]) cyl[-1].append(cy[n] - c[3]) k1l[-1].append(k1[n] - c[4]) k2l[-1].append(k2[n] - c[5]) k3l[-1].append(k3[n] - c[6]) p1l[-1].append(p1[n] - c[7]) p2l[-1].append(p2[n] - c[8]) if PLOT_ERROR_ARRAY: dx = (mapx - L.mapx) / 2 dy = (mapy - L.mapy) / 2 dxl[-1].append(dx) dyl[-1].append(dy) except NothingFound: print( "Couldn't create a calibration because no patterns were detected") del painter # AVERAGE SAMPLES AND GET STD dx_std, dx_mean = [], [] dy_std, dy_mean = [], [] mag = [] std = [] for patterndx, patterndy in zip(dxl, dyl): x = np.mean(patterndx, axis=0) dx_mean.append(x) y = np.mean(patterndy, axis=0) dy_mean.append(y) x = np.std(patterndx, axis=0) mag.append((x**2 + y**2)**0.5) dx_std.append(x) y = np.std(patterndy, axis=0) dy_std.append(y) std.append((x**2 + y**2)**0.5) # PLOT p = len(patterns) if PLOT_RESULTS: fig, axs = plt.subplots(nrows=2, ncols=5) axs = np.array(axs).ravel() for ax, typ, tname in zip(axs, (success, fxl, fyl, cxl, cyl, k1l, k2l, k3l, p1l, p2l), ('Success rate', 'fx', 'fy', 'cx', 'cy', 'k1', 'k2', 'k3', 'p1', 'p2') ): ax.set_title(tname) # , showmeans=True, meanline=True)#labels=patterns.keys()) ax.boxplot(typ, notch=0, sym='+', vert=1, whis=1.5) # , ha=ha[n]) ax.set_xticklabels(patterns.keys(), rotation=40, fontsize=8) if PLOT_ERROR_ARRAY: mmin = np.min(mag) mmax = np.max(mag) smin = np.min(std) smax = np.max(std) plt.figure() for n, pattern in enumerate(patterns.keys()): plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper', vmin=mmin, vmax=mmax) if n == p - 1: plt.colorbar(label='Average') plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper', vmin=smin, vmax=smax) if n == p - 1: plt.colorbar(label='Standard deviation') fig = plt.figure() fig.suptitle('Individually scaled') for n, pattern in enumerate(patterns.keys()): # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() plt.quiver( X, Y, dy_mean[n][::ix, ::iy] * 20, dx_mean[n][::ix, ::iy] * 20) plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(std[n], origin='upper') plt.colorbar() # plt.quiver(X,Y,dx_std[n][::ix,::iy]*50, dy_std[n][::ix,::iy]*10) ############################################# fig = plt.figure() fig.suptitle('Spatial uncertainty + deflection') for n, pattern in enumerate(patterns.keys()): L.calibrate(board_size, method) # there is alot of additional calc thats not necassary: L.setCameraParams( fx[0], fy[0], cx[0], cy[0], k1[0], k2[0], k3[0], p1[0], p2[0]) L._coeffs['shape'] = (imgHeight, imgWidth) L._coeffs['reprojectionError'] = np.mean(errl[n]) # deflection_x, deflection_y = L.getDeflection(width, HEIGHT) # deflection_x += dx_mean[n] # deflection_y += dy_mean[n] ux, uy = L.standardUncertainties() plt.subplot(int('2%s%s' % (p, n + 1)), axisbg='g') plt.title(pattern) plt.imshow(mag[n], origin='upper') plt.colorbar() # DEFLECTION plt.subplot(int('2%s%s' % (p, n + p + 1)), axisbg='g') plt.title(pattern) plt.imshow(np.linalg.norm([ux, uy], axis=0), origin='upper') plt.colorbar() # DEFL: VECTORS # downscale - show max 30 arrows each dimension sy, sx = dx_mean[n].shape ix = int(sx / 15) if ix < 1: ix = 1 iy = int(sy / 15) if iy < 1: iy = 1 Y, X = np.meshgrid(np.arange(0, sy, iy), np.arange(0, sx, ix)) plt.quiver(X, Y, ux[::ix, ::iy] * 20, uy[::ix, ::iy] * 20) if PLOT_ERROR_ARRAY or PLOT_RESULTS: plt.show() return dx_mean, dy_mean
[ "def", "simulateSytematicError", "(", "N_SAMPLES", "=", "5", ",", "N_IMAGES", "=", "10", ",", "SHOW_DETECTED_PATTERN", "=", "True", ",", "# GRAYSCALE=False,\r", "HEIGHT", "=", "500", ",", "PLOT_RESULTS", "=", "True", ",", "PLOT_ERROR_ARRAY", "=", "True", ",", ...
Simulates a lens calibration using synthetic images * images are rendered under the given HEIGHT resolution * noise and smoothing is applied * perspective and position errors are applied * images are deformed using the given CAMERA_PARAM * the detected camera parameters are used to calculate the error to the given ones simulation ----------- N_IMAGES -> number of images to take for a camera calibration N_SAMPLES -> number of camera calibrations of each pattern type output -------- SHOW_DETECTED_PATTERN: print each image and detected pattern to screen PLOT_RESULTS: plot boxplots of the mean error and std of the camera parameters PLOT_ERROR_ARRAY: plot position error for the lens correction pattern -------- this simulation tests the openCV standard patterns: chess board, asymmetric and symmetric circles GRAYSCALE: whether to load the pattern as gray scale RELATIVE_PATTERN_SIZE: the relative size of the pattern within the image (0.4->40%) PERSPECTIVE: [True] -> enable perspective distortion ROTATION: [True] -> enable rotation of the pattern BLUR: False or (sizex,sizey), like (3,3) CAMERA_PARAM: camera calibration parameters as [fx,fy,cx,cy,k1,k2,k3,p1,p2]
[ "Simulates", "a", "lens", "calibration", "using", "synthetic", "images", "*", "images", "are", "rendered", "under", "the", "given", "HEIGHT", "resolution", "*", "noise", "and", "smoothing", "is", "applied", "*", "perspective", "and", "position", "errors", "are",...
python
train
rwl/pylon
examples/pyreto/thesis/common.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/examples/pyreto/thesis/common.py#L107-L120
def get_case24_ieee_rts(): """ Returns the 24 bus IEEE Reliability Test System. """ path = os.path.dirname(pylon.__file__) path = os.path.join(path, "test", "data") path = os.path.join(path, "case24_ieee_rts", "case24_ieee_rts.pkl") case = pylon.Case.load(path) # FIXME: Correct generator naming order. for g in case.generators: g.name return case
[ "def", "get_case24_ieee_rts", "(", ")", ":", "path", "=", "os", ".", "path", ".", "dirname", "(", "pylon", ".", "__file__", ")", "path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "\"test\"", ",", "\"data\"", ")", "path", "=", "os", "."...
Returns the 24 bus IEEE Reliability Test System.
[ "Returns", "the", "24", "bus", "IEEE", "Reliability", "Test", "System", "." ]
python
train
saltstack/salt
salt/modules/sensehat.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/sensehat.py#L140-L189
def show_message(message, msg_type=None, text_color=None, back_color=None, scroll_speed=0.1): ''' Displays a message on the LED matrix. message The message to display msg_type The type of the message. Changes the appearance of the message. Available types are:: error: red text warning: orange text success: green text info: blue text scroll_speed The speed at which the message moves over the LED matrix. This value represents the time paused for between shifting the text to the left by one column of pixels. Defaults to '0.1'. text_color The color in which the message is shown. Defaults to '[255, 255, 255]' (white). back_color The background color of the display. Defaults to '[0, 0, 0]' (black). CLI Example: .. code-block:: bash salt 'raspberry' sensehat.show_message 'Status ok' salt 'raspberry' sensehat.show_message 'Something went wrong' error salt 'raspberry' sensehat.show_message 'Red' text_color='[255, 0, 0]' salt 'raspberry' sensehat.show_message 'Hello world' None '[0, 0, 255]' '[255, 255, 0]' 0.2 ''' text_color = text_color or [255, 255, 255] back_color = back_color or [0, 0, 0] color_by_type = { 'error': [255, 0, 0], 'warning': [255, 100, 0], 'success': [0, 255, 0], 'info': [0, 0, 255] } if msg_type in color_by_type: text_color = color_by_type[msg_type] _sensehat.show_message(message, scroll_speed, text_color, back_color) return {'message': message}
[ "def", "show_message", "(", "message", ",", "msg_type", "=", "None", ",", "text_color", "=", "None", ",", "back_color", "=", "None", ",", "scroll_speed", "=", "0.1", ")", ":", "text_color", "=", "text_color", "or", "[", "255", ",", "255", ",", "255", "...
Displays a message on the LED matrix. message The message to display msg_type The type of the message. Changes the appearance of the message. Available types are:: error: red text warning: orange text success: green text info: blue text scroll_speed The speed at which the message moves over the LED matrix. This value represents the time paused for between shifting the text to the left by one column of pixels. Defaults to '0.1'. text_color The color in which the message is shown. Defaults to '[255, 255, 255]' (white). back_color The background color of the display. Defaults to '[0, 0, 0]' (black). CLI Example: .. code-block:: bash salt 'raspberry' sensehat.show_message 'Status ok' salt 'raspberry' sensehat.show_message 'Something went wrong' error salt 'raspberry' sensehat.show_message 'Red' text_color='[255, 0, 0]' salt 'raspberry' sensehat.show_message 'Hello world' None '[0, 0, 255]' '[255, 255, 0]' 0.2
[ "Displays", "a", "message", "on", "the", "LED", "matrix", "." ]
python
train
Azure/blobxfer
blobxfer/models/synccopy.py
https://github.com/Azure/blobxfer/blob/3eccbe7530cc6a20ab2d30f9e034b6f021817f34/blobxfer/models/synccopy.py#L329-L389
def _resume(self): # type: (Descriptor) -> int """Resume a download, if possible :param Descriptor self: this :rtype: int or None :return: verified download offset """ if self._resume_mgr is None or self._offset > 0: return None # check if path exists in resume db rr = self._resume_mgr.get_record(self._dst_ase) if rr is None: logger.debug('no resume record for {}'.format(self._dst_ase.path)) return None # ensure lengths are the same if rr.length != self._src_ase.size: logger.warning('resume length mismatch {} -> {}'.format( rr.length, self._src_ase.size)) return None # compute replica factor if blobxfer.util.is_not_empty(self._dst_ase.replica_targets): replica_factor = 1 + len(self._dst_ase.replica_targets) else: replica_factor = 1 # set offsets if completed if rr.completed: with self._meta_lock: logger.debug('{} upload already completed'.format( self._dst_ase.path)) self._offset = rr.offset self._src_block_list = rr.src_block_list self._chunk_num = rr.total_chunks self._chunk_size = rr.chunk_size self._total_chunks = rr.total_chunks self._completed_chunks.int = rr.completed_chunks self._outstanding_ops = 0 return self._src_ase.size * replica_factor # re-hash from 0 to offset if needed _cc = bitstring.BitArray(length=rr.total_chunks) _cc.int = rr.completed_chunks curr_chunk = _cc.find('0b0')[0] del _cc # set values from resume with self._meta_lock: self._offset = rr.offset self._src_block_list = rr.src_block_list self._chunk_num = curr_chunk self._chunk_size = rr.chunk_size self._total_chunks = rr.total_chunks self._completed_chunks = bitstring.BitArray(length=rr.total_chunks) self._completed_chunks.set(True, range(0, curr_chunk + 1)) self._outstanding_ops = ( (rr.total_chunks - curr_chunk) * replica_factor ) logger.debug( ('resuming file {} from byte={} chunk={} chunk_size={} ' 'total_chunks={} outstanding_ops={}').format( self._src_ase.path, self._offset, self._chunk_num, self._chunk_size, self._total_chunks, self._outstanding_ops)) return rr.offset * replica_factor
[ "def", "_resume", "(", "self", ")", ":", "# type: (Descriptor) -> int", "if", "self", ".", "_resume_mgr", "is", "None", "or", "self", ".", "_offset", ">", "0", ":", "return", "None", "# check if path exists in resume db", "rr", "=", "self", ".", "_resume_mgr", ...
Resume a download, if possible :param Descriptor self: this :rtype: int or None :return: verified download offset
[ "Resume", "a", "download", "if", "possible", ":", "param", "Descriptor", "self", ":", "this", ":", "rtype", ":", "int", "or", "None", ":", "return", ":", "verified", "download", "offset" ]
python
train
dropbox/stone
stone/backends/python_types.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/backends/python_types.py#L327-L363
def _generate_struct_class(self, ns, data_type): # type: (ApiNamespace, Struct) -> None """Defines a Python class that represents a struct in Stone.""" self.emit(self._class_declaration_for_type(ns, data_type)) with self.indent(): if data_type.has_documented_type_or_fields(): self.emit('"""') if data_type.doc: self.emit_wrapped_text( self.process_doc(data_type.doc, self._docf)) if data_type.has_documented_fields(): self.emit() for field in data_type.fields: if not field.doc: continue self.emit_wrapped_text(':ivar {}: {}'.format( fmt_namespaced_var(ns.name, data_type.name, field.name), self.process_doc(field.doc, self._docf)), subsequent_prefix=' ') self.emit('"""') self.emit() self._generate_struct_class_slots(data_type) self._generate_struct_class_has_required_fields(data_type) self._generate_struct_class_init(data_type) self._generate_struct_class_properties(ns, data_type) self._generate_struct_class_custom_annotations(ns, data_type) self._generate_struct_class_repr(data_type) if data_type.has_enumerated_subtypes(): validator = 'StructTree' else: validator = 'Struct' self.emit('{0}_validator = bv.{1}({0})'.format( class_name_for_data_type(data_type), validator, )) self.emit()
[ "def", "_generate_struct_class", "(", "self", ",", "ns", ",", "data_type", ")", ":", "# type: (ApiNamespace, Struct) -> None", "self", ".", "emit", "(", "self", ".", "_class_declaration_for_type", "(", "ns", ",", "data_type", ")", ")", "with", "self", ".", "inde...
Defines a Python class that represents a struct in Stone.
[ "Defines", "a", "Python", "class", "that", "represents", "a", "struct", "in", "Stone", "." ]
python
train
ariebovenberg/valuable
valuable/xml.py
https://github.com/ariebovenberg/valuable/blob/72ac98b5a044233f13d14a9b9f273ce3a237d9ae/valuable/xml.py#L15-L20
def elemgetter(path: str) -> t.Callable[[Element], Element]: """shortcut making an XML element getter""" return compose( partial(_raise_if_none, exc=LookupError(path)), methodcaller('find', path) )
[ "def", "elemgetter", "(", "path", ":", "str", ")", "->", "t", ".", "Callable", "[", "[", "Element", "]", ",", "Element", "]", ":", "return", "compose", "(", "partial", "(", "_raise_if_none", ",", "exc", "=", "LookupError", "(", "path", ")", ")", ",",...
shortcut making an XML element getter
[ "shortcut", "making", "an", "XML", "element", "getter" ]
python
train
KelSolaar/Umbra
umbra/components/factory/script_editor/script_editor.py
https://github.com/KelSolaar/Umbra/blob/66f45f08d9d723787f1191989f8b0dda84b412ce/umbra/components/factory/script_editor/script_editor.py#L847-L858
def script_editor_file(self, value): """ Setter for **self.__script_editor_file** attribute. :param value: Attribute value. :type value: unicode """ if value is not None: assert type(value) is unicode, "'{0}' attribute: '{1}' type is not 'unicode'!".format( "script_editor_file", value) self.__script_editor_file = value
[ "def", "script_editor_file", "(", "self", ",", "value", ")", ":", "if", "value", "is", "not", "None", ":", "assert", "type", "(", "value", ")", "is", "unicode", ",", "\"'{0}' attribute: '{1}' type is not 'unicode'!\"", ".", "format", "(", "\"script_editor_file\"",...
Setter for **self.__script_editor_file** attribute. :param value: Attribute value. :type value: unicode
[ "Setter", "for", "**", "self", ".", "__script_editor_file", "**", "attribute", "." ]
python
train
iotile/coretools
iotilecore/iotile/core/utilities/schema_verify/string_verify.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilecore/iotile/core/utilities/schema_verify/string_verify.py#L33-L41
def format(self, indent_level, indent_size=4): """Format this verifier Returns: string: A formatted string """ desc = self.format_name('String') return self.wrap_lines(desc, indent_level, indent_size=indent_size)
[ "def", "format", "(", "self", ",", "indent_level", ",", "indent_size", "=", "4", ")", ":", "desc", "=", "self", ".", "format_name", "(", "'String'", ")", "return", "self", ".", "wrap_lines", "(", "desc", ",", "indent_level", ",", "indent_size", "=", "ind...
Format this verifier Returns: string: A formatted string
[ "Format", "this", "verifier" ]
python
train
nschloe/matplotlib2tikz
matplotlib2tikz/util.py
https://github.com/nschloe/matplotlib2tikz/blob/ac5daca6f38b834d757f6c6ae6cc34121956f46b/matplotlib2tikz/util.py#L11-L26
def get_legend_text(obj): """Check if line is in legend. """ leg = obj.axes.get_legend() if leg is None: return None keys = [l.get_label() for l in leg.legendHandles if l is not None] values = [l.get_text() for l in leg.texts] label = obj.get_label() d = dict(zip(keys, values)) if label in d: return d[label] return None
[ "def", "get_legend_text", "(", "obj", ")", ":", "leg", "=", "obj", ".", "axes", ".", "get_legend", "(", ")", "if", "leg", "is", "None", ":", "return", "None", "keys", "=", "[", "l", ".", "get_label", "(", ")", "for", "l", "in", "leg", ".", "legen...
Check if line is in legend.
[ "Check", "if", "line", "is", "in", "legend", "." ]
python
train
openstack/networking-cisco
networking_cisco/apps/saf/agent/vdp/lldpad.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/apps/saf/agent/vdp/lldpad.py#L644-L683
def send_vdp_assoc(self, vsiid=None, mgrid=None, typeid=None, typeid_ver=None, vsiid_frmt=vdp_const.VDP_VSIFRMT_UUID, filter_frmt=vdp_const.VDP_FILTER_GIDMACVID, gid=0, mac="", vlan=0, oui_id="", oui_data="", sw_resp=False): """Sends the VDP Associate Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param vsiid: VSI value, Only UUID supported for now :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return vlan: VLAN value returned by vdptool which in turn is given : by Switch """ if sw_resp and filter_frmt == vdp_const.VDP_FILTER_GIDMACVID: reply = self.send_vdp_query_msg("assoc", mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data) vlan_resp, fail_reason = self.get_vlan_from_query_reply( reply, vsiid, mac) if vlan_resp != constants.INVALID_VLAN: return vlan_resp, fail_reason reply = self.send_vdp_msg("assoc", mgrid, typeid, typeid_ver, vsiid_frmt, vsiid, filter_frmt, gid, mac, vlan, oui_id, oui_data, sw_resp) if sw_resp: vlan, fail_reason = self.get_vlan_from_associate_reply( reply, vsiid, mac) return vlan, fail_reason return None, None
[ "def", "send_vdp_assoc", "(", "self", ",", "vsiid", "=", "None", ",", "mgrid", "=", "None", ",", "typeid", "=", "None", ",", "typeid_ver", "=", "None", ",", "vsiid_frmt", "=", "vdp_const", ".", "VDP_VSIFRMT_UUID", ",", "filter_frmt", "=", "vdp_const", ".",...
Sends the VDP Associate Message. Please refer http://www.ieee802.org/1/pages/802.1bg.html VDP Section for more detailed information :param vsiid: VSI value, Only UUID supported for now :param mgrid: MGR ID :param typeid: Type ID :param typeid_ver: Version of the Type ID :param vsiid_frmt: Format of the following VSI argument :param filter_frmt: Filter Format. Only <GID,MAC,VID> supported for now :param gid: Group ID the vNIC belongs to :param mac: MAC Address of the vNIC :param vlan: VLAN of the vNIC :param oui_id: OUI Type :param oui_data: OUI Data :param sw_resp: Flag indicating if response is required from the daemon :return vlan: VLAN value returned by vdptool which in turn is given : by Switch
[ "Sends", "the", "VDP", "Associate", "Message", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L675-L696
def edit(self, index, trigger, event): """ Prompts the edit for the inputed index given a trigger and event. :param index | <QModelIndex> trigger | <EditTrigger> event | <QEvent> """ # disable right-click editing if trigger in (self.SelectedClicked, self.DoubleClicked) and \ event.button() in (QtCore.Qt.RightButton, QtCore.Qt.MidButton): return False if not self.isColumnEditingEnabled(index.column()): return False item = self.itemFromIndex(index) if isinstance(item, XTreeWidgetItem) and \ not item.isColumnEditingEnabled(index.column()): return False return super(XTreeWidget, self).edit(index, trigger, event)
[ "def", "edit", "(", "self", ",", "index", ",", "trigger", ",", "event", ")", ":", "# disable right-click editing\r", "if", "trigger", "in", "(", "self", ".", "SelectedClicked", ",", "self", ".", "DoubleClicked", ")", "and", "event", ".", "button", "(", ")"...
Prompts the edit for the inputed index given a trigger and event. :param index | <QModelIndex> trigger | <EditTrigger> event | <QEvent>
[ "Prompts", "the", "edit", "for", "the", "inputed", "index", "given", "a", "trigger", "and", "event", ".", ":", "param", "index", "|", "<QModelIndex", ">", "trigger", "|", "<EditTrigger", ">", "event", "|", "<QEvent", ">" ]
python
train
grabbles/grabbit
grabbit/core.py
https://github.com/grabbles/grabbit/blob/83ff93df36019eaaee9d4e31f816a518e46cae07/grabbit/core.py#L666-L697
def add_entity(self, domain, **kwargs): ''' Add a new Entity to tracking. ''' # Set the entity's mapping func if one was specified map_func = kwargs.get('map_func', None) if map_func is not None and not callable(kwargs['map_func']): if self.entity_mapper is None: raise ValueError("Mapping function '%s' specified for Entity " "'%s', but no entity mapper was passed when " "initializing the current Layout. Please make" " sure the 'entity_mapper' argument is set." % (map_func, kwargs['name'])) map_func = getattr(self.entity_mapper, kwargs['map_func']) kwargs['map_func'] = map_func ent = Entity(domain=domain, **kwargs) domain.add_entity(ent) if ent.mandatory: self.mandatory.add(ent.id) if ent.directory is not None: ent.directory = ent.directory.replace('{{root}}', self.root) self.entities[ent.id] = ent for alias in ent.aliases: self.entities[alias] = ent if self.dynamic_getters: func = partial(getattr(self, 'get'), target=ent.name, return_type='id') func_name = inflect.engine().plural(ent.name) setattr(self, 'get_%s' % func_name, func)
[ "def", "add_entity", "(", "self", ",", "domain", ",", "*", "*", "kwargs", ")", ":", "# Set the entity's mapping func if one was specified", "map_func", "=", "kwargs", ".", "get", "(", "'map_func'", ",", "None", ")", "if", "map_func", "is", "not", "None", "and"...
Add a new Entity to tracking.
[ "Add", "a", "new", "Entity", "to", "tracking", "." ]
python
train
artefactual-labs/agentarchives
agentarchives/archivists_toolkit/client.py
https://github.com/artefactual-labs/agentarchives/blob/af19ade56a90c64069cf46b50972fe72b6f10a45/agentarchives/archivists_toolkit/client.py#L334-L353
def find_resource_id_for_component(self, component_id): """ Given the ID of a component, returns the parent resource ID. If the immediate parent of the component is itself a component, this method will progress up the tree until a resource is found. :param long component_id: The ID of the ResourceComponent. :return: The ID of the component's parent resource. :rtype: long """ cursor = self.db.cursor() sql = "SELECT resourceId, parentResourceComponentId FROM ResourcesComponents WHERE resourceComponentId=%s" cursor.execute(sql, (component_id,)) resource_id, parent_id = cursor.fetchone() if resource_id is None: return self.find_resource_id_for_component(parent_id) else: return resource_id
[ "def", "find_resource_id_for_component", "(", "self", ",", "component_id", ")", ":", "cursor", "=", "self", ".", "db", ".", "cursor", "(", ")", "sql", "=", "\"SELECT resourceId, parentResourceComponentId FROM ResourcesComponents WHERE resourceComponentId=%s\"", "cursor", "....
Given the ID of a component, returns the parent resource ID. If the immediate parent of the component is itself a component, this method will progress up the tree until a resource is found. :param long component_id: The ID of the ResourceComponent. :return: The ID of the component's parent resource. :rtype: long
[ "Given", "the", "ID", "of", "a", "component", "returns", "the", "parent", "resource", "ID", "." ]
python
train
vecnet/vecnet.openmalaria
vecnet/openmalaria/scenario/monitoring.py
https://github.com/vecnet/vecnet.openmalaria/blob/795bc9d1b81a6c664f14879edda7a7c41188e95a/vecnet/openmalaria/scenario/monitoring.py#L155-L163
def _get_measures(self, et): """ Get a list of measures in <continuous> or <SurveyOptions> section """ list_of_measures = [] for tag in et.findall("option"): if tag.attrib.get("value", "true") == "true": list_of_measures.append(tag.attrib["name"]) return list_of_measures
[ "def", "_get_measures", "(", "self", ",", "et", ")", ":", "list_of_measures", "=", "[", "]", "for", "tag", "in", "et", ".", "findall", "(", "\"option\"", ")", ":", "if", "tag", ".", "attrib", ".", "get", "(", "\"value\"", ",", "\"true\"", ")", "==", ...
Get a list of measures in <continuous> or <SurveyOptions> section
[ "Get", "a", "list", "of", "measures", "in", "<continuous", ">", "or", "<SurveyOptions", ">", "section" ]
python
train
serge-sans-paille/pythran
pythran/spec.py
https://github.com/serge-sans-paille/pythran/blob/7e1b5af2dddfabc50bd2a977f0178be269b349b5/pythran/spec.py#L382-L392
def p_array_index(self, p): '''array_index : | NUM | COLUMN | COLUMN COLUMN''' if len(p) == 3: p[0] = slice(0, -1, -1) elif len(p) == 1 or p[1] == ':': p[0] = slice(0, -1, 1) else: p[0] = slice(0, int(p[1]), 1)
[ "def", "p_array_index", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "3", ":", "p", "[", "0", "]", "=", "slice", "(", "0", ",", "-", "1", ",", "-", "1", ")", "elif", "len", "(", "p", ")", "==", "1", "or", "p", "[",...
array_index : | NUM | COLUMN | COLUMN COLUMN
[ "array_index", ":", "|", "NUM", "|", "COLUMN", "|", "COLUMN", "COLUMN" ]
python
train
openego/ding0
ding0/grid/mv_grid/mv_connect.py
https://github.com/openego/ding0/blob/e2d6528f96255e4bb22ba15514a4f1883564ed5d/ding0/grid/mv_grid/mv_connect.py#L901-L1008
def mv_connect_generators(mv_grid_district, graph, debug=False): """Connect MV generators to MV grid Args ---- mv_grid_district: MVGridDistrictDing0 MVGridDistrictDing0 object for which the connection process has to be done graph: :networkx:`NetworkX Graph Obj< >` NetworkX graph object with nodes debug: bool, defaults to False If True, information is printed during process. Returns ------- :networkx:`NetworkX Graph Obj< >` NetworkX graph object with nodes and newly created branches """ generator_buffer_radius = cfg_ding0.get('mv_connect', 'generator_buffer_radius') generator_buffer_radius_inc = cfg_ding0.get('mv_connect', 'generator_buffer_radius_inc') # WGS84 (conformal) to ETRS (equidistant) projection proj1 = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # source coordinate system pyproj.Proj(init='epsg:3035')) # destination coordinate system # ETRS (equidistant) to WGS84 (conformal) projection proj2 = partial( pyproj.transform, pyproj.Proj(init='epsg:3035'), # source coordinate system pyproj.Proj(init='epsg:4326')) # destination coordinate system for generator in sorted(mv_grid_district.mv_grid.generators(), key=lambda x: repr(x)): # ===== voltage level 4: generator has to be connected to MV station ===== if generator.v_level == 4: mv_station = mv_grid_district.mv_grid.station() branch_length = calc_geo_dist_vincenty(generator, mv_station) # TODO: set branch type to something reasonable (to be calculated) branch_kind = mv_grid_district.mv_grid.default_branch_kind branch_type = mv_grid_district.mv_grid.default_branch_type branch = BranchDing0(length=branch_length, kind=branch_kind, type=branch_type, ring=None) graph.add_edge(generator, mv_station, branch=branch) if debug: logger.debug('Generator {0} was connected to {1}'.format( generator, mv_station)) # ===== voltage level 5: generator has to be connected to MV grid (next-neighbor) ===== elif generator.v_level == 5: generator_shp = transform(proj1, generator.geo_data) # get branches within a the predefined radius `generator_buffer_radius` branches = calc_geo_branches_in_buffer(generator, mv_grid_district.mv_grid, generator_buffer_radius, generator_buffer_radius_inc, proj1) # calc distance between generator and grid's lines -> find nearest line conn_objects_min_stack = find_nearest_conn_objects(generator_shp, branches, proj1, conn_dist_weight=1, debug=debug, branches_only=False) # connect! # go through the stack (from nearest to most far connection target object) generator_connected = False for dist_min_obj in conn_objects_min_stack: # Note 1: conn_dist_ring_mod=0 to avoid re-routing of existent lines # Note 2: In connect_node(), the default cable/line type of grid is used. This is reasonable since # the max. allowed power of the smallest possible cable/line type (3.64 MVA for overhead # line of type 48-AL1/8-ST1A) exceeds the max. allowed power of a generator (4.5 MVA (dena)) # (if connected separately!) target_obj_result = connect_node(generator, generator_shp, mv_grid_district.mv_grid, dist_min_obj, proj2, graph, conn_dist_ring_mod=0, debug=debug) if target_obj_result is not None: if debug: logger.debug( 'Generator {0} was connected to {1}'.format( generator, target_obj_result)) generator_connected = True break if not generator_connected and debug: logger.debug( 'Generator {0} could not be connected, try to ' 'increase the parameter `generator_buffer_radius` in ' 'config file `config_calc.cfg` to gain more possible ' 'connection points.'.format(generator)) return graph
[ "def", "mv_connect_generators", "(", "mv_grid_district", ",", "graph", ",", "debug", "=", "False", ")", ":", "generator_buffer_radius", "=", "cfg_ding0", ".", "get", "(", "'mv_connect'", ",", "'generator_buffer_radius'", ")", "generator_buffer_radius_inc", "=", "cfg_d...
Connect MV generators to MV grid Args ---- mv_grid_district: MVGridDistrictDing0 MVGridDistrictDing0 object for which the connection process has to be done graph: :networkx:`NetworkX Graph Obj< >` NetworkX graph object with nodes debug: bool, defaults to False If True, information is printed during process. Returns ------- :networkx:`NetworkX Graph Obj< >` NetworkX graph object with nodes and newly created branches
[ "Connect", "MV", "generators", "to", "MV", "grid" ]
python
train
awslabs/serverless-application-model
samtranslator/validator/validator.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/samtranslator/validator/validator.py#L12-L35
def validate(template_dict, schema=None): """ Is this a valid SAM template dictionary :param dict template_dict: Data to be validated :param dict schema: Optional, dictionary containing JSON Schema representing SAM template :return: Empty string if there are no validation errors in template """ if not schema: schema = SamTemplateValidator._read_schema() validation_errors = "" try: jsonschema.validate(template_dict, schema) except ValidationError as ex: # Stringifying the exception will give us useful error message validation_errors = str(ex) # Swallowing expected exception here as our caller is expecting validation errors and # not the valiation exception itself pass return validation_errors
[ "def", "validate", "(", "template_dict", ",", "schema", "=", "None", ")", ":", "if", "not", "schema", ":", "schema", "=", "SamTemplateValidator", ".", "_read_schema", "(", ")", "validation_errors", "=", "\"\"", "try", ":", "jsonschema", ".", "validate", "(",...
Is this a valid SAM template dictionary :param dict template_dict: Data to be validated :param dict schema: Optional, dictionary containing JSON Schema representing SAM template :return: Empty string if there are no validation errors in template
[ "Is", "this", "a", "valid", "SAM", "template", "dictionary" ]
python
train
python-wink/python-wink
src/pywink/api.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/api.py#L464-L483
def post_session(): """ This endpoint appears to be required in order to keep pubnub updates flowing for some user. This just posts a random nonce to the /users/me/session endpoint and returns the result. """ url_string = "{}/users/me/session".format(WinkApiInterface.BASE_URL) nonce = ''.join([str(random.randint(0, 9)) for i in range(9)]) _json = {"nonce": str(nonce)} try: arequest = requests.post(url_string, data=json.dumps(_json), headers=API_HEADERS) response_json = arequest.json() return response_json except requests.exceptions.RequestException: return None
[ "def", "post_session", "(", ")", ":", "url_string", "=", "\"{}/users/me/session\"", ".", "format", "(", "WinkApiInterface", ".", "BASE_URL", ")", "nonce", "=", "''", ".", "join", "(", "[", "str", "(", "random", ".", "randint", "(", "0", ",", "9", ")", ...
This endpoint appears to be required in order to keep pubnub updates flowing for some user. This just posts a random nonce to the /users/me/session endpoint and returns the result.
[ "This", "endpoint", "appears", "to", "be", "required", "in", "order", "to", "keep", "pubnub", "updates", "flowing", "for", "some", "user", "." ]
python
train
gem/oq-engine
openquake/calculators/extract.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/extract.py#L139-L150
def extract_(dstore, dspath): """ Extracts an HDF5 path object from the datastore, for instance extract(dstore, 'sitecol'). """ obj = dstore[dspath] if isinstance(obj, Dataset): return ArrayWrapper(obj.value, obj.attrs) elif isinstance(obj, Group): return ArrayWrapper(numpy.array(list(obj)), obj.attrs) else: return obj
[ "def", "extract_", "(", "dstore", ",", "dspath", ")", ":", "obj", "=", "dstore", "[", "dspath", "]", "if", "isinstance", "(", "obj", ",", "Dataset", ")", ":", "return", "ArrayWrapper", "(", "obj", ".", "value", ",", "obj", ".", "attrs", ")", "elif", ...
Extracts an HDF5 path object from the datastore, for instance extract(dstore, 'sitecol').
[ "Extracts", "an", "HDF5", "path", "object", "from", "the", "datastore", "for", "instance", "extract", "(", "dstore", "sitecol", ")", "." ]
python
train
librosa/librosa
librosa/util/utils.py
https://github.com/librosa/librosa/blob/180e8e6eb8f958fa6b20b8cba389f7945d508247/librosa/util/utils.py#L111-L172
def valid_audio(y, mono=True): '''Validate whether a variable contains valid, mono audio data. Parameters ---------- y : np.ndarray The input data to validate mono : bool Whether or not to force monophonic audio Returns ------- valid : bool True if all tests pass Raises ------ ParameterError If `y` fails to meet the following criteria: - `type(y)` is `np.ndarray` - `y.dtype` is floating-point - `mono == True` and `y.ndim` is not 1 - `mono == False` and `y.ndim` is not 1 or 2 - `np.isfinite(y).all()` is not True Notes ----- This function caches at level 20. Examples -------- >>> # Only allow monophonic signals >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> librosa.util.valid_audio(y) True >>> # If we want to allow stereo signals >>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False) >>> librosa.util.valid_audio(y, mono=False) True ''' if not isinstance(y, np.ndarray): raise ParameterError('data must be of type numpy.ndarray') if not np.issubdtype(y.dtype, np.floating): raise ParameterError('data must be floating-point') if mono and y.ndim != 1: raise ParameterError('Invalid shape for monophonic audio: ' 'ndim={:d}, shape={}'.format(y.ndim, y.shape)) elif y.ndim > 2 or y.ndim == 0: raise ParameterError('Audio must have shape (samples,) or (channels, samples). ' 'Received shape={}'.format(y.shape)) if not np.isfinite(y).all(): raise ParameterError('Audio buffer is not finite everywhere') return True
[ "def", "valid_audio", "(", "y", ",", "mono", "=", "True", ")", ":", "if", "not", "isinstance", "(", "y", ",", "np", ".", "ndarray", ")", ":", "raise", "ParameterError", "(", "'data must be of type numpy.ndarray'", ")", "if", "not", "np", ".", "issubdtype",...
Validate whether a variable contains valid, mono audio data. Parameters ---------- y : np.ndarray The input data to validate mono : bool Whether or not to force monophonic audio Returns ------- valid : bool True if all tests pass Raises ------ ParameterError If `y` fails to meet the following criteria: - `type(y)` is `np.ndarray` - `y.dtype` is floating-point - `mono == True` and `y.ndim` is not 1 - `mono == False` and `y.ndim` is not 1 or 2 - `np.isfinite(y).all()` is not True Notes ----- This function caches at level 20. Examples -------- >>> # Only allow monophonic signals >>> y, sr = librosa.load(librosa.util.example_audio_file()) >>> librosa.util.valid_audio(y) True >>> # If we want to allow stereo signals >>> y, sr = librosa.load(librosa.util.example_audio_file(), mono=False) >>> librosa.util.valid_audio(y, mono=False) True
[ "Validate", "whether", "a", "variable", "contains", "valid", "mono", "audio", "data", "." ]
python
test
kennell/schiene
schiene/schiene.py
https://github.com/kennell/schiene/blob/a8f1ba2bd30f9f4a373c7b0ced589bd60121aa1f/schiene/schiene.py#L40-L46
def parse_stations(html): """ Strips JS code, loads JSON """ html = html.replace('SLs.sls=', '').replace(';SLs.showSuggestion();', '') html = json.loads(html) return html['suggestions']
[ "def", "parse_stations", "(", "html", ")", ":", "html", "=", "html", ".", "replace", "(", "'SLs.sls='", ",", "''", ")", ".", "replace", "(", "';SLs.showSuggestion();'", ",", "''", ")", "html", "=", "json", ".", "loads", "(", "html", ")", "return", "htm...
Strips JS code, loads JSON
[ "Strips", "JS", "code", "loads", "JSON" ]
python
train
codebynumbers/ftpretty
ftpretty.py
https://github.com/codebynumbers/ftpretty/blob/5ee6e2cc679199ff52d1cd2ed1b0613f12aa6f67/ftpretty.py#L196-L203
def cd(self, remote): """ Change working directory on server """ try: self.conn.cwd(remote) except Exception: return False else: return self.pwd()
[ "def", "cd", "(", "self", ",", "remote", ")", ":", "try", ":", "self", ".", "conn", ".", "cwd", "(", "remote", ")", "except", "Exception", ":", "return", "False", "else", ":", "return", "self", ".", "pwd", "(", ")" ]
Change working directory on server
[ "Change", "working", "directory", "on", "server" ]
python
train
apple/turicreate
deps/src/libxml2-2.9.1/python/libxml2.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/libxml2-2.9.1/python/libxml2.py#L5129-L5135
def decodeEntities(self, len, what, end, end2, end3): """This function is deprecated, we now always process entities content through xmlStringDecodeEntities TODO: remove it in next major release. [67] Reference ::= EntityRef | CharRef [69] PEReference ::= '%' Name ';' """ ret = libxml2mod.xmlDecodeEntities(self._o, len, what, end, end2, end3) return ret
[ "def", "decodeEntities", "(", "self", ",", "len", ",", "what", ",", "end", ",", "end2", ",", "end3", ")", ":", "ret", "=", "libxml2mod", ".", "xmlDecodeEntities", "(", "self", ".", "_o", ",", "len", ",", "what", ",", "end", ",", "end2", ",", "end3"...
This function is deprecated, we now always process entities content through xmlStringDecodeEntities TODO: remove it in next major release. [67] Reference ::= EntityRef | CharRef [69] PEReference ::= '%' Name ';'
[ "This", "function", "is", "deprecated", "we", "now", "always", "process", "entities", "content", "through", "xmlStringDecodeEntities", "TODO", ":", "remove", "it", "in", "next", "major", "release", ".", "[", "67", "]", "Reference", "::", "=", "EntityRef", "|",...
python
train
napalm-automation/napalm
napalm/junos/junos.py
https://github.com/napalm-automation/napalm/blob/c11ae8bb5ce395698704a0051cdf8d144fbb150d/napalm/junos/junos.py#L1399-L1425
def get_arp_table(self, vrf=""): """Return the ARP table.""" # could use ArpTable # from jnpr.junos.op.phyport import ArpTable # and simply use it # but # we need: # - filters # - group by VLAN ID # - hostname & TTE fields as well if vrf: msg = "VRF support has not been added for this getter on this platform." raise NotImplementedError(msg) arp_table = [] arp_table_raw = junos_views.junos_arp_table(self.device) arp_table_raw.get() arp_table_items = arp_table_raw.items() for arp_table_entry in arp_table_items: arp_entry = {elem[0]: elem[1] for elem in arp_table_entry[1]} arp_entry["mac"] = napalm.base.helpers.mac(arp_entry.get("mac")) arp_entry["ip"] = napalm.base.helpers.ip(arp_entry.get("ip")) arp_table.append(arp_entry) return arp_table
[ "def", "get_arp_table", "(", "self", ",", "vrf", "=", "\"\"", ")", ":", "# could use ArpTable", "# from jnpr.junos.op.phyport import ArpTable", "# and simply use it", "# but", "# we need:", "# - filters", "# - group by VLAN ID", "# - hostname & TTE fields as well", "if", ...
Return the ARP table.
[ "Return", "the", "ARP", "table", "." ]
python
train
i3visio/osrframework
osrframework/thirdparties/pipl_com/lib/containers.py
https://github.com/i3visio/osrframework/blob/83437f4c14c9c08cb80a896bd9834c77f6567871/osrframework/thirdparties/pipl_com/lib/containers.py#L232-L245
def unsearchable_fields(self): """A list of all the fields that can't be searched by. For example: names/usernames that are too short, emails that are invalid etc. """ filter_func = lambda field: not field.is_searchable return filter(filter_func, self.names) + \ filter(filter_func, self.emails) + \ filter(filter_func, self.phones) + \ filter(filter_func, self.usernames) + \ filter(filter_func, self.addresses) + \ filter(filter_func, self.dobs)
[ "def", "unsearchable_fields", "(", "self", ")", ":", "filter_func", "=", "lambda", "field", ":", "not", "field", ".", "is_searchable", "return", "filter", "(", "filter_func", ",", "self", ".", "names", ")", "+", "filter", "(", "filter_func", ",", "self", "...
A list of all the fields that can't be searched by. For example: names/usernames that are too short, emails that are invalid etc.
[ "A", "list", "of", "all", "the", "fields", "that", "can", "t", "be", "searched", "by", ".", "For", "example", ":", "names", "/", "usernames", "that", "are", "too", "short", "emails", "that", "are", "invalid", "etc", "." ]
python
train
pydron/anycall
anycall/bytequeue.py
https://github.com/pydron/anycall/blob/43add96660258a14b24aa8e8413dffb1741b72d7/anycall/bytequeue.py#L19-L30
def enqueue(self, s): """ Append `s` to the queue. Equivalent to:: queue += s if `queue` where a regular string. """ self._parts.append(s) self._len += len(s)
[ "def", "enqueue", "(", "self", ",", "s", ")", ":", "self", ".", "_parts", ".", "append", "(", "s", ")", "self", ".", "_len", "+=", "len", "(", "s", ")" ]
Append `s` to the queue. Equivalent to:: queue += s if `queue` where a regular string.
[ "Append", "s", "to", "the", "queue", ".", "Equivalent", "to", "::", "queue", "+", "=", "s", "if", "queue", "where", "a", "regular", "string", "." ]
python
test
PyGithub/PyGithub
github/Branch.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/Branch.py#L287-L296
def get_admin_enforcement(self): """ :calls: `GET /repos/:owner/:repo/branches/:branch/protection/enforce_admins <https://developer.github.com/v3/repos/branches>`_ :rtype: bool """ headers, data = self._requester.requestJsonAndCheck( "GET", self.protection_url + "/enforce_admins" ) return data["enabled"]
[ "def", "get_admin_enforcement", "(", "self", ")", ":", "headers", ",", "data", "=", "self", ".", "_requester", ".", "requestJsonAndCheck", "(", "\"GET\"", ",", "self", ".", "protection_url", "+", "\"/enforce_admins\"", ")", "return", "data", "[", "\"enabled\"", ...
:calls: `GET /repos/:owner/:repo/branches/:branch/protection/enforce_admins <https://developer.github.com/v3/repos/branches>`_ :rtype: bool
[ ":", "calls", ":", "GET", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "branches", "/", ":", "branch", "/", "protection", "/", "enforce_admins", "<https", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "repos", "/", "...
python
train
hyperledger/sawtooth-core
validator/sawtooth_validator/journal/block_info_injector.py
https://github.com/hyperledger/sawtooth-core/blob/8cf473bc2207e51f02bd182d825158a57d72b098/validator/sawtooth_validator/journal/block_info_injector.py#L80-L102
def block_start(self, previous_block): """Returns an ordered list of batches to inject at the beginning of the block. Can also return None if no batches should be injected. Args: previous_block (Block): The previous block. Returns: A list of batches to inject. """ previous_header_bytes = previous_block.header previous_header = BlockHeader() previous_header.ParseFromString(previous_header_bytes) block_info = BlockInfo( block_num=previous_header.block_num, previous_block_id=previous_header.previous_block_id, signer_public_key=previous_header.signer_public_key, header_signature=previous_block.header_signature, timestamp=int(time.time())) return [self.create_batch(block_info)]
[ "def", "block_start", "(", "self", ",", "previous_block", ")", ":", "previous_header_bytes", "=", "previous_block", ".", "header", "previous_header", "=", "BlockHeader", "(", ")", "previous_header", ".", "ParseFromString", "(", "previous_header_bytes", ")", "block_inf...
Returns an ordered list of batches to inject at the beginning of the block. Can also return None if no batches should be injected. Args: previous_block (Block): The previous block. Returns: A list of batches to inject.
[ "Returns", "an", "ordered", "list", "of", "batches", "to", "inject", "at", "the", "beginning", "of", "the", "block", ".", "Can", "also", "return", "None", "if", "no", "batches", "should", "be", "injected", "." ]
python
train
dhondta/tinyscript
tinyscript/report/__init__.py
https://github.com/dhondta/tinyscript/blob/624a0718db698899e7bc3ba6ac694baed251e81d/tinyscript/report/__init__.py#L133-L143
def html(self, text=TEXT): """ Generate an HTML file from the report data. """ self.logger.debug("Generating the HTML report{}..." .format(["", " (text only)"][text])) html = [] for piece in self._pieces: if isinstance(piece, string_types): html.append(markdown2.markdown(piece, extras=["tables"])) elif isinstance(piece, Element): html.append(piece.html()) return "\n\n".join(html)
[ "def", "html", "(", "self", ",", "text", "=", "TEXT", ")", ":", "self", ".", "logger", ".", "debug", "(", "\"Generating the HTML report{}...\"", ".", "format", "(", "[", "\"\"", ",", "\" (text only)\"", "]", "[", "text", "]", ")", ")", "html", "=", "["...
Generate an HTML file from the report data.
[ "Generate", "an", "HTML", "file", "from", "the", "report", "data", "." ]
python
train
marcomusy/vtkplotter
vtkplotter/utils.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/utils.py#L90-L97
def vector(x, y=None, z=0.0): """Return a 3D numpy array representing a vector (of type `numpy.float64`). If `y` is ``None``, assume input is already in the form `[x,y,z]`. """ if y is None: # assume x is already [x,y,z] return np.array(x, dtype=np.float64) return np.array([x, y, z], dtype=np.float64)
[ "def", "vector", "(", "x", ",", "y", "=", "None", ",", "z", "=", "0.0", ")", ":", "if", "y", "is", "None", ":", "# assume x is already [x,y,z]", "return", "np", ".", "array", "(", "x", ",", "dtype", "=", "np", ".", "float64", ")", "return", "np", ...
Return a 3D numpy array representing a vector (of type `numpy.float64`). If `y` is ``None``, assume input is already in the form `[x,y,z]`.
[ "Return", "a", "3D", "numpy", "array", "representing", "a", "vector", "(", "of", "type", "numpy", ".", "float64", ")", "." ]
python
train
darvid/biome
src/biome/__init__.py
https://github.com/darvid/biome/blob/e1f1945165df9def31af42e5e13b623e1de97f01/src/biome/__init__.py#L204-L230
def get_path(self, name, default=None): """Retrieves an environment variable as a filesystem path. Requires the `pathlib`_ library if using Python <= 3.4. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: pathlib.Path: The environment variable as a ``pathlib.Path`` object. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. .. _pathlib: https://pypi.python.org/pypi/pathlib/ """ if name not in self: if default is not None: return default raise EnvironmentError.not_found(self._prefix, name) return pathlib.Path(self[name])
[ "def", "get_path", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "if", "name", "not", "in", "self", ":", "if", "default", "is", "not", "None", ":", "return", "default", "raise", "EnvironmentError", ".", "not_found", "(", "self", ".",...
Retrieves an environment variable as a filesystem path. Requires the `pathlib`_ library if using Python <= 3.4. Args: name (str): The case-insensitive, unprefixed variable name. default: If provided, a default value will be returned instead of throwing ``EnvironmentError``. Returns: pathlib.Path: The environment variable as a ``pathlib.Path`` object. Raises: EnvironmentError: If the environment variable does not exist, and ``default`` was not provided. .. _pathlib: https://pypi.python.org/pypi/pathlib/
[ "Retrieves", "an", "environment", "variable", "as", "a", "filesystem", "path", "." ]
python
train
mikedh/trimesh
trimesh/poses.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/poses.py#L158-L187
def _orient3dfast(plane, pd): """ Performs a fast 3D orientation test. Parameters ---------- plane: (3,3) float, three points in space that define a plane pd: (3,) float, a single point Returns ------- result: float, if greater than zero then pd is above the plane through the given three points, if less than zero then pd is below the given plane, and if equal to zero then pd is on the given plane. """ pa, pb, pc = plane adx = pa[0] - pd[0] bdx = pb[0] - pd[0] cdx = pc[0] - pd[0] ady = pa[1] - pd[1] bdy = pb[1] - pd[1] cdy = pc[1] - pd[1] adz = pa[2] - pd[2] bdz = pb[2] - pd[2] cdz = pc[2] - pd[2] return (adx * (bdy * cdz - bdz * cdy) + bdx * (cdy * adz - cdz * ady) + cdx * (ady * bdz - adz * bdy))
[ "def", "_orient3dfast", "(", "plane", ",", "pd", ")", ":", "pa", ",", "pb", ",", "pc", "=", "plane", "adx", "=", "pa", "[", "0", "]", "-", "pd", "[", "0", "]", "bdx", "=", "pb", "[", "0", "]", "-", "pd", "[", "0", "]", "cdx", "=", "pc", ...
Performs a fast 3D orientation test. Parameters ---------- plane: (3,3) float, three points in space that define a plane pd: (3,) float, a single point Returns ------- result: float, if greater than zero then pd is above the plane through the given three points, if less than zero then pd is below the given plane, and if equal to zero then pd is on the given plane.
[ "Performs", "a", "fast", "3D", "orientation", "test", "." ]
python
train
ethan92429/onshapepy
onshapepy/core/utils.py
https://github.com/ethan92429/onshapepy/blob/61dc7ccbdc6095fa6cc3b4a414e2f72d03d1c9df/onshapepy/core/utils.py#L77-L101
def parse_quantity(q): """ Parse an OnShape units definition Args: q: Returns: a string that can be converted to any other unit engine. >>> from onshapepy.core.utils import parse_quantity >>> d = {'value': 0.1414213562373095, 'unitToPower': [{'value': 1, 'key': 'METER'}], 'typeTag': ''} >>> parse_quantity(d) '0.1414213562373095*meter' >>> d = {'value': 0.1414213562373095, 'unitToPower': [{'value': 3, 'key': 'MILLIMETER'}], 'typeTag': ''} >>> parse_quantity(d) '0.1414213562373095*millimeter**3' """ units_s = str(q['value']) for u in q['unitToPower']: units_s = units_s + "*" + u['key'].lower() power = u['value'] if not power == 1: units_s = units_s + "**" + str(power) return units_s
[ "def", "parse_quantity", "(", "q", ")", ":", "units_s", "=", "str", "(", "q", "[", "'value'", "]", ")", "for", "u", "in", "q", "[", "'unitToPower'", "]", ":", "units_s", "=", "units_s", "+", "\"*\"", "+", "u", "[", "'key'", "]", ".", "lower", "("...
Parse an OnShape units definition Args: q: Returns: a string that can be converted to any other unit engine. >>> from onshapepy.core.utils import parse_quantity >>> d = {'value': 0.1414213562373095, 'unitToPower': [{'value': 1, 'key': 'METER'}], 'typeTag': ''} >>> parse_quantity(d) '0.1414213562373095*meter' >>> d = {'value': 0.1414213562373095, 'unitToPower': [{'value': 3, 'key': 'MILLIMETER'}], 'typeTag': ''} >>> parse_quantity(d) '0.1414213562373095*millimeter**3'
[ "Parse", "an", "OnShape", "units", "definition", "Args", ":", "q", ":" ]
python
train
6809/MC6809
MC6809/components/mc6809_base.py
https://github.com/6809/MC6809/blob/6ba2f5106df46689017b5d0b6d84d43b7ee6a240/MC6809/components/mc6809_base.py#L888-L911
def instruction_SUB(self, opcode, m, register): """ Subtracts the value in memory location M from the contents of a register. The C (carry) bit represents a borrow and is set to the inverse of the resulting binary carry. source code forms: SUBA P; SUBB P; SUBD P CC bits "HNZVC": uaaaa """ r = register.value r_new = r - m register.set(r_new) # log.debug("$%x SUB8 %s: $%x - $%x = $%x (dez.: %i - %i = %i)" % ( # self.program_counter, register.name, # r, m, r_new, # r, m, r_new, # )) self.clear_NZVC() if register.WIDTH == 8: self.update_NZVC_8(r, m, r_new) else: assert register.WIDTH == 16 self.update_NZVC_16(r, m, r_new)
[ "def", "instruction_SUB", "(", "self", ",", "opcode", ",", "m", ",", "register", ")", ":", "r", "=", "register", ".", "value", "r_new", "=", "r", "-", "m", "register", ".", "set", "(", "r_new", ")", "# log.debug(\"$%x SUB8 %s: $%x - $%x = $%x (dez.: %i ...
Subtracts the value in memory location M from the contents of a register. The C (carry) bit represents a borrow and is set to the inverse of the resulting binary carry. source code forms: SUBA P; SUBB P; SUBD P CC bits "HNZVC": uaaaa
[ "Subtracts", "the", "value", "in", "memory", "location", "M", "from", "the", "contents", "of", "a", "register", ".", "The", "C", "(", "carry", ")", "bit", "represents", "a", "borrow", "and", "is", "set", "to", "the", "inverse", "of", "the", "resulting", ...
python
train
pypa/pipenv
pipenv/patched/notpip/_internal/cli/autocompletion.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/patched/notpip/_internal/cli/autocompletion.py#L125-L152
def auto_complete_paths(current, completion_type): """If ``completion_type`` is ``file`` or ``path``, list all regular files and directories starting with ``current``; otherwise only list directories starting with ``current``. :param current: The word to be completed :param completion_type: path completion type(`file`, `path` or `dir`)i :return: A generator of regular files and/or directories """ directory, filename = os.path.split(current) current_path = os.path.abspath(directory) # Don't complete paths if they can't be accessed if not os.access(current_path, os.R_OK): return filename = os.path.normcase(filename) # list all files that start with ``filename`` file_list = (x for x in os.listdir(current_path) if os.path.normcase(x).startswith(filename)) for f in file_list: opt = os.path.join(current_path, f) comp_file = os.path.normcase(os.path.join(directory, f)) # complete regular files when there is not ``<dir>`` after option # complete directories when there is ``<file>``, ``<path>`` or # ``<dir>``after option if completion_type != 'dir' and os.path.isfile(opt): yield comp_file elif os.path.isdir(opt): yield os.path.join(comp_file, '')
[ "def", "auto_complete_paths", "(", "current", ",", "completion_type", ")", ":", "directory", ",", "filename", "=", "os", ".", "path", ".", "split", "(", "current", ")", "current_path", "=", "os", ".", "path", ".", "abspath", "(", "directory", ")", "# Don't...
If ``completion_type`` is ``file`` or ``path``, list all regular files and directories starting with ``current``; otherwise only list directories starting with ``current``. :param current: The word to be completed :param completion_type: path completion type(`file`, `path` or `dir`)i :return: A generator of regular files and/or directories
[ "If", "completion_type", "is", "file", "or", "path", "list", "all", "regular", "files", "and", "directories", "starting", "with", "current", ";", "otherwise", "only", "list", "directories", "starting", "with", "current", "." ]
python
train
tgbugs/ontquery
ontquery/plugins/interlex_client.py
https://github.com/tgbugs/ontquery/blob/bcf4863cb2bf221afe2b093c5dc7da1377300041/ontquery/plugins/interlex_client.py#L81-L89
def get(self, url: str) -> List[dict]: """ Requests data from database """ response = requests.get( url, headers = {'Content-type': 'application/json'}, auth = ('scicrunch', 'perl22(query)') # for test2.scicrunch.org ) output = self.process_response(response) return output
[ "def", "get", "(", "self", ",", "url", ":", "str", ")", "->", "List", "[", "dict", "]", ":", "response", "=", "requests", ".", "get", "(", "url", ",", "headers", "=", "{", "'Content-type'", ":", "'application/json'", "}", ",", "auth", "=", "(", "'s...
Requests data from database
[ "Requests", "data", "from", "database" ]
python
train
opendatateam/udata
udata/theme/__init__.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/theme/__init__.py#L141-L148
def render(template, **context): ''' Render a template with uData frontend specifics * Theme ''' theme = current_app.config['THEME'] return render_theme_template(get_theme(theme), template, **context)
[ "def", "render", "(", "template", ",", "*", "*", "context", ")", ":", "theme", "=", "current_app", ".", "config", "[", "'THEME'", "]", "return", "render_theme_template", "(", "get_theme", "(", "theme", ")", ",", "template", ",", "*", "*", "context", ")" ...
Render a template with uData frontend specifics * Theme
[ "Render", "a", "template", "with", "uData", "frontend", "specifics" ]
python
train
CEA-COSMIC/ModOpt
modopt/opt/proximity.py
https://github.com/CEA-COSMIC/ModOpt/blob/019b189cb897cbb4d210c44a100daaa08468830c/modopt/opt/proximity.py#L356-L395
def _check_operators(self, operators): """ Check Inputs This method cheks that the input operators and weights are correctly formatted Parameters ---------- operators : list, tuple or np.ndarray List of linear operator class instances Returns ------- np.array operators Raises ------ TypeError For invalid input type """ if not isinstance(operators, (list, tuple, np.ndarray)): raise TypeError('Invalid input type, operators must be a list, ' 'tuple or numpy array.') operators = np.array(operators) if not operators.size: raise ValueError('Operator list is empty.') for operator in operators: if not hasattr(operator, 'op'): raise ValueError('Operators must contain "op" method.') if not hasattr(operator, 'cost'): raise ValueError('Operators must contain "cost" method.') operator.op = check_callable(operator.op) operator.cost = check_callable(operator.cost) return operators
[ "def", "_check_operators", "(", "self", ",", "operators", ")", ":", "if", "not", "isinstance", "(", "operators", ",", "(", "list", ",", "tuple", ",", "np", ".", "ndarray", ")", ")", ":", "raise", "TypeError", "(", "'Invalid input type, operators must be a list...
Check Inputs This method cheks that the input operators and weights are correctly formatted Parameters ---------- operators : list, tuple or np.ndarray List of linear operator class instances Returns ------- np.array operators Raises ------ TypeError For invalid input type
[ "Check", "Inputs" ]
python
train
saltstack/salt
salt/modules/file.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/file.py#L6580-L6657
def open_files(by_pid=False): ''' Return a list of all physical open files on the system. CLI Examples: .. code-block:: bash salt '*' file.open_files salt '*' file.open_files by_pid=True ''' # First we collect valid PIDs pids = {} procfs = os.listdir('/proc/') for pfile in procfs: try: pids[int(pfile)] = [] except ValueError: # Not a valid PID, move on pass # Then we look at the open files for each PID files = {} for pid in pids: ppath = '/proc/{0}'.format(pid) try: tids = os.listdir('{0}/task'.format(ppath)) except OSError: continue # Collect the names of all of the file descriptors fd_ = [] #try: # fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid))) #except Exception: # pass for fpath in os.listdir('{0}/fd'.format(ppath)): fd_.append('{0}/fd/{1}'.format(ppath, fpath)) for tid in tids: try: fd_.append( os.path.realpath('{0}/task/{1}/exe'.format(ppath, tid)) ) except OSError: continue for tpath in os.listdir('{0}/task/{1}/fd'.format(ppath, tid)): fd_.append('{0}/task/{1}/fd/{2}'.format(ppath, tid, tpath)) fd_ = sorted(set(fd_)) # Loop through file descriptors and return useful data for each file for fdpath in fd_: # Sometimes PIDs and TIDs disappear before we can query them try: name = os.path.realpath(fdpath) # Running stat on the file cuts out all of the sockets and # deleted files from the list os.stat(name) except OSError: continue if name not in files: files[name] = [pid] else: # We still want to know which PIDs are using each file files[name].append(pid) files[name] = sorted(set(files[name])) pids[pid].append(name) pids[pid] = sorted(set(pids[pid])) if by_pid: return pids return files
[ "def", "open_files", "(", "by_pid", "=", "False", ")", ":", "# First we collect valid PIDs", "pids", "=", "{", "}", "procfs", "=", "os", ".", "listdir", "(", "'/proc/'", ")", "for", "pfile", "in", "procfs", ":", "try", ":", "pids", "[", "int", "(", "pf...
Return a list of all physical open files on the system. CLI Examples: .. code-block:: bash salt '*' file.open_files salt '*' file.open_files by_pid=True
[ "Return", "a", "list", "of", "all", "physical", "open", "files", "on", "the", "system", "." ]
python
train
koordinates/python-client
koordinates/base.py
https://github.com/koordinates/python-client/blob/f3dc7cd164f5a9499b2454cd1d4516e9d4b3c252/koordinates/base.py#L146-L153
def _update_range(self, response): """ Update the query count property from the `X-Resource-Range` response header """ header_value = response.headers.get('x-resource-range', '') m = re.match(r'\d+-\d+/(\d+)$', header_value) if m: self._count = int(m.group(1)) else: self._count = None
[ "def", "_update_range", "(", "self", ",", "response", ")", ":", "header_value", "=", "response", ".", "headers", ".", "get", "(", "'x-resource-range'", ",", "''", ")", "m", "=", "re", ".", "match", "(", "r'\\d+-\\d+/(\\d+)$'", ",", "header_value", ")", "if...
Update the query count property from the `X-Resource-Range` response header
[ "Update", "the", "query", "count", "property", "from", "the", "X", "-", "Resource", "-", "Range", "response", "header" ]
python
train
CityOfZion/neo-python
neo/Core/TX/EnrollmentTransaction.py
https://github.com/CityOfZion/neo-python/blob/fe90f62e123d720d4281c79af0598d9df9e776fb/neo/Core/TX/EnrollmentTransaction.py#L29-L42
def DeserializeExclusiveData(self, reader): """ Deserialize full object. Args: reader (neo.IO.BinaryReader): Raises: Exception: If the version read is incorrect. """ if self.Version is not 0: raise Exception('Invalid format') self.PublicKey = ECDSA.Deserialize_Secp256r1(reader)
[ "def", "DeserializeExclusiveData", "(", "self", ",", "reader", ")", ":", "if", "self", ".", "Version", "is", "not", "0", ":", "raise", "Exception", "(", "'Invalid format'", ")", "self", ".", "PublicKey", "=", "ECDSA", ".", "Deserialize_Secp256r1", "(", "read...
Deserialize full object. Args: reader (neo.IO.BinaryReader): Raises: Exception: If the version read is incorrect.
[ "Deserialize", "full", "object", "." ]
python
train
python-wink/python-wink
src/pywink/devices/cloud_clock.py
https://github.com/python-wink/python-wink/blob/cf8bdce8c6518f30b91b23aa7aa32e89c2ce48da/src/pywink/devices/cloud_clock.py#L99-L120
def _update_state_from_response(self, response_json): """ :param response_json: the json obj returned from query :return: """ if 'data' in response_json and response_json['data']['object_type'] == "cloud_clock": cloud_clock = response_json.get('data') if cloud_clock is None: return False alarms = cloud_clock.get('alarms') for alarm in alarms: if alarm.get('object_id') == self.object_id(): self.json_state = alarm return True return False if 'data' in response_json: alarm = response_json.get('data') self.json_state = alarm return True self.json_state = response_json return True
[ "def", "_update_state_from_response", "(", "self", ",", "response_json", ")", ":", "if", "'data'", "in", "response_json", "and", "response_json", "[", "'data'", "]", "[", "'object_type'", "]", "==", "\"cloud_clock\"", ":", "cloud_clock", "=", "response_json", ".",...
:param response_json: the json obj returned from query :return:
[ ":", "param", "response_json", ":", "the", "json", "obj", "returned", "from", "query", ":", "return", ":" ]
python
train
AnalogJ/lexicon
lexicon/providers/hetzner.py
https://github.com/AnalogJ/lexicon/blob/9330b871988753cad44fe2876a217b4c67b1fa0e/lexicon/providers/hetzner.py#L438-L455
def _get_nameservers(domain): """ Looks for domain nameservers and returns the IPs of the nameservers as a list. The list is empty, if no nameservers were found. Needed associated domain zone name for lookup. """ nameservers = [] rdtypes_ns = ['SOA', 'NS'] rdtypes_ip = ['A', 'AAAA'] for rdtype_ns in rdtypes_ns: for rdata_ns in Provider._dns_lookup(domain, rdtype_ns): for rdtype_ip in rdtypes_ip: for rdata_ip in Provider._dns_lookup(rdata_ns.to_text().split(' ')[0], rdtype_ip): if rdata_ip.to_text() not in nameservers: nameservers.append(rdata_ip.to_text()) LOGGER.debug('DNS Lookup => %s IN NS %s', domain, ' '.join(nameservers)) return nameservers
[ "def", "_get_nameservers", "(", "domain", ")", ":", "nameservers", "=", "[", "]", "rdtypes_ns", "=", "[", "'SOA'", ",", "'NS'", "]", "rdtypes_ip", "=", "[", "'A'", ",", "'AAAA'", "]", "for", "rdtype_ns", "in", "rdtypes_ns", ":", "for", "rdata_ns", "in", ...
Looks for domain nameservers and returns the IPs of the nameservers as a list. The list is empty, if no nameservers were found. Needed associated domain zone name for lookup.
[ "Looks", "for", "domain", "nameservers", "and", "returns", "the", "IPs", "of", "the", "nameservers", "as", "a", "list", ".", "The", "list", "is", "empty", "if", "no", "nameservers", "were", "found", ".", "Needed", "associated", "domain", "zone", "name", "f...
python
train
niolabs/python-xbee
examples/alarm.py
https://github.com/niolabs/python-xbee/blob/b91be3d0ee7ccaa1990120b5b5490999d8e6cbc7/examples/alarm.py#L177-L195
def bed_occupied(self): """ bed_occupied: None -> boolean Determines whether the bed is currently occupied by requesting data from the remote XBee and comparing the analog value with a threshold. """ # Receive samples from the remote device self._set_send_samples(True) while True: packet = self.hw.wait_read_frame() if 'adc-0' in packet['samples'][0]: # Stop receiving samples from the remote device self._set_send_samples(False) return packet['samples'][0]['adc-0'] > XBeeAlarm.DETECT_THRESH
[ "def", "bed_occupied", "(", "self", ")", ":", "# Receive samples from the remote device", "self", ".", "_set_send_samples", "(", "True", ")", "while", "True", ":", "packet", "=", "self", ".", "hw", ".", "wait_read_frame", "(", ")", "if", "'adc-0'", "in", "pack...
bed_occupied: None -> boolean Determines whether the bed is currently occupied by requesting data from the remote XBee and comparing the analog value with a threshold.
[ "bed_occupied", ":", "None", "-", ">", "boolean" ]
python
train
aequitas/python-rflink
rflink/protocol.py
https://github.com/aequitas/python-rflink/blob/46759ce8daf95cfc7cdb608ae17bc5501be9f6d8/rflink/protocol.py#L215-L231
def _handle_packet(self, packet): """Event specific packet handling logic. Break packet into events and fires configured event callback or nicely prints events for console. """ events = packet_events(packet) for event in events: if self.ignore_event(event['id']): log.debug('ignoring event with id: %s', event) continue log.debug('got event: %s', event) if self.event_callback: self.event_callback(event) else: self.handle_event(event)
[ "def", "_handle_packet", "(", "self", ",", "packet", ")", ":", "events", "=", "packet_events", "(", "packet", ")", "for", "event", "in", "events", ":", "if", "self", ".", "ignore_event", "(", "event", "[", "'id'", "]", ")", ":", "log", ".", "debug", ...
Event specific packet handling logic. Break packet into events and fires configured event callback or nicely prints events for console.
[ "Event", "specific", "packet", "handling", "logic", "." ]
python
train
ev3dev/ev3dev-lang-python
ev3dev2/sensor/lego.py
https://github.com/ev3dev/ev3dev-lang-python/blob/afc98d35004b533dc161a01f7c966e78607d7c1e/ev3dev2/sensor/lego.py#L976-L981
def reflected_light_intensity(self): """ A measurement of the reflected light intensity, as a percentage. """ self._ensure_mode(self.MODE_REFLECT) return self.value(0) * self._scale('REFLECT')
[ "def", "reflected_light_intensity", "(", "self", ")", ":", "self", ".", "_ensure_mode", "(", "self", ".", "MODE_REFLECT", ")", "return", "self", ".", "value", "(", "0", ")", "*", "self", ".", "_scale", "(", "'REFLECT'", ")" ]
A measurement of the reflected light intensity, as a percentage.
[ "A", "measurement", "of", "the", "reflected", "light", "intensity", "as", "a", "percentage", "." ]
python
train