repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
marshmallow-code/apispec-webframeworks
src/apispec_webframeworks/tornado.py
https://github.com/marshmallow-code/apispec-webframeworks/blob/21b0b4135c073d2ada47a4228377e63bc03ac7f9/src/apispec_webframeworks/tornado.py#L59-L81
def tornadopath2openapi(urlspec, method): """Convert Tornado URLSpec to OpenAPI-compliant path. :param urlspec: :type urlspec: URLSpec :param method: Handler http method :type method: function """ if sys.version_info >= (3, 3): args = list(inspect.signature(method).parameters.keys())[1:] else: if getattr(method, '__tornado_coroutine__', False): method = method.__wrapped__ args = inspect.getargspec(method).args[1:] params = tuple('{{{}}}'.format(arg) for arg in args) try: path_tpl = urlspec.matcher._path except AttributeError: # tornado<4.5 path_tpl = urlspec._path path = (path_tpl % params) if path.count('/') > 1: path = path.rstrip('/?*') return path
[ "def", "tornadopath2openapi", "(", "urlspec", ",", "method", ")", ":", "if", "sys", ".", "version_info", ">=", "(", "3", ",", "3", ")", ":", "args", "=", "list", "(", "inspect", ".", "signature", "(", "method", ")", ".", "parameters", ".", "keys", "(...
Convert Tornado URLSpec to OpenAPI-compliant path. :param urlspec: :type urlspec: URLSpec :param method: Handler http method :type method: function
[ "Convert", "Tornado", "URLSpec", "to", "OpenAPI", "-", "compliant", "path", "." ]
python
train
36.478261
fracpete/python-weka-wrapper3
python/weka/core/database.py
https://github.com/fracpete/python-weka-wrapper3/blob/d850ab1bdb25fbd5a8d86e99f34a397975425838/python/weka/core/database.py#L182-L195
def retrieve_instances(self, query=None): """ Executes either the supplied query or the one set via options (or the 'query' property). :param query: query to execute if not the currently set one :type query: str :return: the generated dataq :rtype: Instances """ if query is None: data = javabridge.call(self.jobject, "retrieveInstances", "()Lweka/core/Instances;") else: data = javabridge.call(self.jobject, "retrieveInstances", "(Ljava/lang/String;)Lweka/core/Instances;") return Instances(data)
[ "def", "retrieve_instances", "(", "self", ",", "query", "=", "None", ")", ":", "if", "query", "is", "None", ":", "data", "=", "javabridge", ".", "call", "(", "self", ".", "jobject", ",", "\"retrieveInstances\"", ",", "\"()Lweka/core/Instances;\"", ")", "else...
Executes either the supplied query or the one set via options (or the 'query' property). :param query: query to execute if not the currently set one :type query: str :return: the generated dataq :rtype: Instances
[ "Executes", "either", "the", "supplied", "query", "or", "the", "one", "set", "via", "options", "(", "or", "the", "query", "property", ")", "." ]
python
train
42.071429
cackharot/suds-py3
suds/xsd/doctor.py
https://github.com/cackharot/suds-py3/blob/7387ec7806e9be29aad0a711bea5cb3c9396469c/suds/xsd/doctor.py#L142-L159
def apply(self, root): """ Apply the import (rule) to the specified schema. If the schema does not already contain an import for the I{namespace} specified here, it is added. @param root: A schema root. @type root: L{Element} """ if not self.filter.match(root, self.ns): return if self.exists(root): return node = Element('import', ns=self.xsdns) node.set('namespace', self.ns) if self.location is not None: node.set('schemaLocation', self.location) log.debug('inserting: %s', node) root.insert(node)
[ "def", "apply", "(", "self", ",", "root", ")", ":", "if", "not", "self", ".", "filter", ".", "match", "(", "root", ",", "self", ".", "ns", ")", ":", "return", "if", "self", ".", "exists", "(", "root", ")", ":", "return", "node", "=", "Element", ...
Apply the import (rule) to the specified schema. If the schema does not already contain an import for the I{namespace} specified here, it is added. @param root: A schema root. @type root: L{Element}
[ "Apply", "the", "import", "(", "rule", ")", "to", "the", "specified", "schema", ".", "If", "the", "schema", "does", "not", "already", "contain", "an", "import", "for", "the", "I", "{", "namespace", "}", "specified", "here", "it", "is", "added", "." ]
python
train
35.055556
jashort/SmartFileSorter
smartfilesorter/ruleset.py
https://github.com/jashort/SmartFileSorter/blob/77faf09e5a737da93e16e71a64707366b8307910/smartfilesorter/ruleset.py#L67-L82
def add_rule(self, config_name, value, plugins, destination): """ Adds a rule. Use add_action_rule or add_match_rule instead :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule) :param config_name: config_name of the plugin to add :param value: configuration information for the rule :param plugins: list of all available plugins :param destination: list to append plugin to (self.action_rules or self.match_rules) :return: """ if config_name in plugins: rule = plugins[config_name](value) destination.append(rule) else: self.logger.error("Plugin with config_name {0} not found".format(config_name)) raise IndexError("Plugin with config_name {0} not found".format(config_name))
[ "def", "add_rule", "(", "self", ",", "config_name", ",", "value", ",", "plugins", ",", "destination", ")", ":", "if", "config_name", "in", "plugins", ":", "rule", "=", "plugins", "[", "config_name", "]", "(", "value", ")", "destination", ".", "append", "...
Adds a rule. Use add_action_rule or add_match_rule instead :param rule_wrapper: Rule wrapper class (ActionRule or MatchRule) :param config_name: config_name of the plugin to add :param value: configuration information for the rule :param plugins: list of all available plugins :param destination: list to append plugin to (self.action_rules or self.match_rules) :return:
[ "Adds", "a", "rule", ".", "Use", "add_action_rule", "or", "add_match_rule", "instead", ":", "param", "rule_wrapper", ":", "Rule", "wrapper", "class", "(", "ActionRule", "or", "MatchRule", ")", ":", "param", "config_name", ":", "config_name", "of", "the", "plug...
python
train
50.6875
gem/oq-engine
openquake/hazardlib/geo/line.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/geo/line.py#L205-L243
def resample_to_num_points(self, num_points): """ Resample the line to a specified number of points. :param num_points: Integer number of points the resulting line should have. :returns: A new line with that many points as requested. """ assert len(self.points) > 1, "can not resample the line of one point" section_length = self.get_length() / (num_points - 1) resampled_points = [self.points[0]] segment = 0 acc_length = 0 last_segment_length = 0 for i in range(num_points - 1): tot_length = (i + 1) * section_length while tot_length > acc_length and segment < len(self.points) - 1: last_segment_length = self.points[segment].distance( self.points[segment + 1] ) acc_length += last_segment_length segment += 1 p1, p2 = self.points[segment - 1:segment + 1] offset = tot_length - (acc_length - last_segment_length) if offset < 1e-5: # forward geodetic transformations for very small distances # are very inefficient (and also unneeded). if target point # is just 1 cm away from original (non-resampled) line vertex, # don't even bother doing geodetic calculations. resampled = p1 else: resampled = p1.equally_spaced_points(p2, offset)[1] resampled_points.append(resampled) return Line(resampled_points)
[ "def", "resample_to_num_points", "(", "self", ",", "num_points", ")", ":", "assert", "len", "(", "self", ".", "points", ")", ">", "1", ",", "\"can not resample the line of one point\"", "section_length", "=", "self", ".", "get_length", "(", ")", "/", "(", "num...
Resample the line to a specified number of points. :param num_points: Integer number of points the resulting line should have. :returns: A new line with that many points as requested.
[ "Resample", "the", "line", "to", "a", "specified", "number", "of", "points", "." ]
python
train
40.025641
dwavesystems/dwave_networkx
dwave_networkx/algorithms/elimination_ordering.py
https://github.com/dwavesystems/dwave_networkx/blob/9ea1223ddbc7e86db2f90b8b23e250e6642c3d68/dwave_networkx/algorithms/elimination_ordering.py#L703-L727
def _theorem5p4(adj, ub): """By Theorem 5.4, if any two vertices have ub + 1 common neighbors then we can add an edge between them. """ new_edges = set() for u, v in itertools.combinations(adj, 2): if u in adj[v]: # already an edge continue if len(adj[u].intersection(adj[v])) > ub: new_edges.add((u, v)) while new_edges: for u, v in new_edges: adj[u].add(v) adj[v].add(u) new_edges = set() for u, v in itertools.combinations(adj, 2): if u in adj[v]: continue if len(adj[u].intersection(adj[v])) > ub: new_edges.add((u, v))
[ "def", "_theorem5p4", "(", "adj", ",", "ub", ")", ":", "new_edges", "=", "set", "(", ")", "for", "u", ",", "v", "in", "itertools", ".", "combinations", "(", "adj", ",", "2", ")", ":", "if", "u", "in", "adj", "[", "v", "]", ":", "# already an edge...
By Theorem 5.4, if any two vertices have ub + 1 common neighbors then we can add an edge between them.
[ "By", "Theorem", "5", ".", "4", "if", "any", "two", "vertices", "have", "ub", "+", "1", "common", "neighbors", "then", "we", "can", "add", "an", "edge", "between", "them", "." ]
python
train
27.32
openstack/horizon
openstack_dashboard/api/keystone.py
https://github.com/openstack/horizon/blob/5601ea9477323e599d9b766fcac1f8be742935b2/openstack_dashboard/api/keystone.py#L843-L851
def remove_tenant_user_role(request, project=None, user=None, role=None, group=None, domain=None): """Removes a given single role for a user from a tenant.""" manager = keystoneclient(request, admin=True).roles if VERSIONS.active < 3: return manager.remove_user_role(user, role, project) else: return manager.revoke(role, user=user, project=project, group=group, domain=domain)
[ "def", "remove_tenant_user_role", "(", "request", ",", "project", "=", "None", ",", "user", "=", "None", ",", "role", "=", "None", ",", "group", "=", "None", ",", "domain", "=", "None", ")", ":", "manager", "=", "keystoneclient", "(", "request", ",", "...
Removes a given single role for a user from a tenant.
[ "Removes", "a", "given", "single", "role", "for", "a", "user", "from", "a", "tenant", "." ]
python
train
51
wummel/linkchecker
linkcheck/fileutil.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/fileutil.py#L176-L184
def has_changed (filename): """Check if filename has changed since the last check. If this is the first check, assume the file is changed.""" key = os.path.abspath(filename) mtime = get_mtime(key) if key not in _mtime_cache: _mtime_cache[key] = mtime return True return mtime > _mtime_cache[key]
[ "def", "has_changed", "(", "filename", ")", ":", "key", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "mtime", "=", "get_mtime", "(", "key", ")", "if", "key", "not", "in", "_mtime_cache", ":", "_mtime_cache", "[", "key", "]", "=", "mt...
Check if filename has changed since the last check. If this is the first check, assume the file is changed.
[ "Check", "if", "filename", "has", "changed", "since", "the", "last", "check", ".", "If", "this", "is", "the", "first", "check", "assume", "the", "file", "is", "changed", "." ]
python
train
36.333333
crackinglandia/pype32
tools/readpe.py
https://github.com/crackinglandia/pype32/blob/192fd14dfc0dd36d953739a81c17fbaf5e3d6076/tools/readpe.py#L64-L70
def showFileHeaderData(peInstance): """ Prints IMAGE_FILE_HEADER fields. """ fileHeaderFields = peInstance.ntHeaders.fileHeader.getFields() print "[+] IMAGE_FILE_HEADER values:\n" for field in fileHeaderFields: print "--> %s = 0x%08x" % (field, fileHeaderFields[field].value)
[ "def", "showFileHeaderData", "(", "peInstance", ")", ":", "fileHeaderFields", "=", "peInstance", ".", "ntHeaders", ".", "fileHeader", ".", "getFields", "(", ")", "print", "\"[+] IMAGE_FILE_HEADER values:\\n\"", "for", "field", "in", "fileHeaderFields", ":", "print", ...
Prints IMAGE_FILE_HEADER fields.
[ "Prints", "IMAGE_FILE_HEADER", "fields", "." ]
python
train
43.285714
kata198/AdvancedHTMLParser
AdvancedHTMLParser/Tags.py
https://github.com/kata198/AdvancedHTMLParser/blob/06aeea5d8e2ea86e155aae0fc237623d3e9b7f9d/AdvancedHTMLParser/Tags.py#L1200-L1220
def getAllChildNodes(self): ''' getAllChildNodes - Gets all the children, and their children, and their children, and so on, all the way to the end as a TagCollection. Use .childNodes for a regular list @return TagCollection<AdvancedTag> - A TagCollection of all children (and their children recursive) ''' ret = TagCollection() # Scan all the children of this node for child in self.children: # Append each child ret.append(child) # Append children's children recursive ret += child.getAllChildNodes() return ret
[ "def", "getAllChildNodes", "(", "self", ")", ":", "ret", "=", "TagCollection", "(", ")", "# Scan all the children of this node", "for", "child", "in", "self", ".", "children", ":", "# Append each child", "ret", ".", "append", "(", "child", ")", "# Append children'...
getAllChildNodes - Gets all the children, and their children, and their children, and so on, all the way to the end as a TagCollection. Use .childNodes for a regular list @return TagCollection<AdvancedTag> - A TagCollection of all children (and their children recursive)
[ "getAllChildNodes", "-", "Gets", "all", "the", "children", "and", "their", "children", "and", "their", "children", "and", "so", "on", "all", "the", "way", "to", "the", "end", "as", "a", "TagCollection", ".", "Use", ".", "childNodes", "for", "a", "regular",...
python
train
31.666667
secdev/scapy
scapy/utils.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/utils.py#L72-L85
def get_temp_dir(keep=False): """Creates a temporary file, and returns its name. :param keep: If False (default), the directory will be recursively deleted when Scapy exits. :return: A full path to a temporary directory. """ dname = tempfile.mkdtemp(prefix="scapy") if not keep: conf.temp_files.append(dname) return dname
[ "def", "get_temp_dir", "(", "keep", "=", "False", ")", ":", "dname", "=", "tempfile", ".", "mkdtemp", "(", "prefix", "=", "\"scapy\"", ")", "if", "not", "keep", ":", "conf", ".", "temp_files", ".", "append", "(", "dname", ")", "return", "dname" ]
Creates a temporary file, and returns its name. :param keep: If False (default), the directory will be recursively deleted when Scapy exits. :return: A full path to a temporary directory.
[ "Creates", "a", "temporary", "file", "and", "returns", "its", "name", "." ]
python
train
26.071429
hosford42/xcs
xcs/framework.py
https://github.com/hosford42/xcs/blob/183bdd0dd339e19ded3be202f86e1b38bdb9f1e5/xcs/framework.py#L430-L451
def _compute_prediction(self): """Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None """ total_weight = 0 total_prediction = 0 for rule in self._rules.values(): total_weight += rule.prediction_weight total_prediction += (rule.prediction * rule.prediction_weight) self._prediction = total_prediction / (total_weight or 1) self._prediction_weight = total_weight
[ "def", "_compute_prediction", "(", "self", ")", ":", "total_weight", "=", "0", "total_prediction", "=", "0", "for", "rule", "in", "self", ".", "_rules", ".", "values", "(", ")", ":", "total_weight", "+=", "rule", ".", "prediction_weight", "total_prediction", ...
Compute the combined prediction and prediction weight for this action set. The combined prediction is the weighted average of the individual predictions of the classifiers. The combined prediction weight is the sum of the individual prediction weights of the classifiers. Usage: Do not call this method directly. Use the prediction and/or prediction_weight properties instead. Arguments: None Return: None
[ "Compute", "the", "combined", "prediction", "and", "prediction", "weight", "for", "this", "action", "set", ".", "The", "combined", "prediction", "is", "the", "weighted", "average", "of", "the", "individual", "predictions", "of", "the", "classifiers", ".", "The",...
python
train
40.363636
odlgroup/odl
odl/solvers/smooth/newton.py
https://github.com/odlgroup/odl/blob/b8443f6aca90e191ba36c91d32253c5a36249a6c/odl/solvers/smooth/newton.py#L21-L71
def _bfgs_direction(s, y, x, hessinv_estimate=None): r"""Compute ``Hn^-1(x)`` for the L-BFGS method. Parameters ---------- s : sequence of `LinearSpaceElement` The ``s`` coefficients in the BFGS update, see Notes. y : sequence of `LinearSpaceElement` The ``y`` coefficients in the BFGS update, see Notes. x : `LinearSpaceElement` Point in which to evaluate the product. hessinv_estimate : `Operator`, optional Initial estimate of the hessian ``H0^-1``. Returns ------- r : ``x.space`` element The result of ``Hn^-1(x)``. Notes ----- :math:`H_n^{-1}` is defined recursively as .. math:: H_{n+1}^{-1} = \left(I - \frac{ s_n y_n^T}{y_n^T s_n} \right) H_{n}^{-1} \left(I - \frac{ y_n s_n^T}{y_n^T s_n} \right) + \frac{s_n s_n^T}{y_n^T \, s_n} With :math:`H_0^{-1}` given by ``hess_estimate``. """ assert len(s) == len(y) r = x.copy() alphas = np.zeros(len(s)) rhos = np.zeros(len(s)) for i in reversed(range(len(s))): rhos[i] = 1.0 / y[i].inner(s[i]) alphas[i] = rhos[i] * (s[i].inner(r)) r.lincomb(1, r, -alphas[i], y[i]) if hessinv_estimate is not None: r = hessinv_estimate(r) for i in range(len(s)): beta = rhos[i] * (y[i].inner(r)) r.lincomb(1, r, alphas[i] - beta, s[i]) return r
[ "def", "_bfgs_direction", "(", "s", ",", "y", ",", "x", ",", "hessinv_estimate", "=", "None", ")", ":", "assert", "len", "(", "s", ")", "==", "len", "(", "y", ")", "r", "=", "x", ".", "copy", "(", ")", "alphas", "=", "np", ".", "zeros", "(", ...
r"""Compute ``Hn^-1(x)`` for the L-BFGS method. Parameters ---------- s : sequence of `LinearSpaceElement` The ``s`` coefficients in the BFGS update, see Notes. y : sequence of `LinearSpaceElement` The ``y`` coefficients in the BFGS update, see Notes. x : `LinearSpaceElement` Point in which to evaluate the product. hessinv_estimate : `Operator`, optional Initial estimate of the hessian ``H0^-1``. Returns ------- r : ``x.space`` element The result of ``Hn^-1(x)``. Notes ----- :math:`H_n^{-1}` is defined recursively as .. math:: H_{n+1}^{-1} = \left(I - \frac{ s_n y_n^T}{y_n^T s_n} \right) H_{n}^{-1} \left(I - \frac{ y_n s_n^T}{y_n^T s_n} \right) + \frac{s_n s_n^T}{y_n^T \, s_n} With :math:`H_0^{-1}` given by ``hess_estimate``.
[ "r", "Compute", "Hn^", "-", "1", "(", "x", ")", "for", "the", "L", "-", "BFGS", "method", "." ]
python
train
26.764706
jantman/awslimitchecker
docs/examples/multi-region_multi-account/alc_multi_account.py
https://github.com/jantman/awslimitchecker/blob/e50197f70f3d0abcc5cfc7fde6336f548b790e34/docs/examples/multi-region_multi-account/alc_multi_account.py#L124-L135
def _load_region(self, acct_id, region_name, path): """load config from a single per-region subdirectory of an account""" lim_path = os.path.join(path, 'limit_overrides.json') thresh_path = os.path.join(path, 'threshold_overrides.json') res = {'limit_overrides': {}, 'threshold_overrides': {}} if os.path.exists(lim_path): with open(lim_path, 'r') as fh: res['limit_overrides'] = json.loads(fh.read()) if os.path.exists(thresh_path): with open(thresh_path, 'r') as fh: res['threshold_overrides'] = json.loads(fh.read()) self._config[acct_id]['regions'][region_name] = res
[ "def", "_load_region", "(", "self", ",", "acct_id", ",", "region_name", ",", "path", ")", ":", "lim_path", "=", "os", ".", "path", ".", "join", "(", "path", ",", "'limit_overrides.json'", ")", "thresh_path", "=", "os", ".", "path", ".", "join", "(", "p...
load config from a single per-region subdirectory of an account
[ "load", "config", "from", "a", "single", "per", "-", "region", "subdirectory", "of", "an", "account" ]
python
train
56
coldfix/udiskie
udiskie/cli.py
https://github.com/coldfix/udiskie/blob/804c9d27df6f7361fec3097c432398f2d702f911/udiskie/cli.py#L204-L214
def run(self): """Run the main loop. Returns exit code.""" self.exit_code = 1 self.mainloop = GLib.MainLoop() try: future = ensure_future(self._start_async_tasks()) future.callbacks.append(self.set_exit_code) self.mainloop.run() return self.exit_code except KeyboardInterrupt: return 1
[ "def", "run", "(", "self", ")", ":", "self", ".", "exit_code", "=", "1", "self", ".", "mainloop", "=", "GLib", ".", "MainLoop", "(", ")", "try", ":", "future", "=", "ensure_future", "(", "self", ".", "_start_async_tasks", "(", ")", ")", "future", "."...
Run the main loop. Returns exit code.
[ "Run", "the", "main", "loop", ".", "Returns", "exit", "code", "." ]
python
train
34.090909
Hackerfleet/hfos
hfos/schemata/defaultform.py
https://github.com/Hackerfleet/hfos/blob/b6df14eacaffb6be5c844108873ff8763ec7f0c9/hfos/schemata/defaultform.py#L92-L114
def lookup_field(key, lookup_type=None, placeholder=None, html_class="div", select_type="strapselect", mapping="uuid"): """Generates a lookup field for form definitions""" if lookup_type is None: lookup_type = key if placeholder is None: placeholder = "Select a " + lookup_type result = { 'key': key, 'htmlClass': html_class, 'type': select_type, 'placeholder': placeholder, 'options': { "type": lookup_type, "asyncCallback": "$ctrl.getFormData", "map": {'valueProperty': mapping, 'nameProperty': 'name'} } } return result
[ "def", "lookup_field", "(", "key", ",", "lookup_type", "=", "None", ",", "placeholder", "=", "None", ",", "html_class", "=", "\"div\"", ",", "select_type", "=", "\"strapselect\"", ",", "mapping", "=", "\"uuid\"", ")", ":", "if", "lookup_type", "is", "None", ...
Generates a lookup field for form definitions
[ "Generates", "a", "lookup", "field", "for", "form", "definitions" ]
python
train
28.043478
Qiskit/qiskit-terra
qiskit/tools/jupyter/backend_overview.py
https://github.com/Qiskit/qiskit-terra/blob/d4f58d903bc96341b816f7c35df936d6421267d1/qiskit/tools/jupyter/backend_overview.py#L112-L167
def backend_widget(backend): """Creates a backend widget. """ config = backend.configuration().to_dict() props = backend.properties().to_dict() name = widgets.HTML(value="<h4>{name}</h4>".format(name=backend.name()), layout=widgets.Layout()) n_qubits = config['n_qubits'] qubit_count = widgets.HTML(value="<h5><b>{qubits}</b></h5>".format(qubits=n_qubits), layout=widgets.Layout(justify_content='center')) cmap = widgets.Output(layout=widgets.Layout(min_width='250px', max_width='250px', max_height='250px', min_height='250px', justify_content='center', align_items='center', margin='0px 0px 0px 0px')) with cmap: _cmap_fig = plot_gate_map(backend, plot_directed=False, label_qubits=False) if _cmap_fig is not None: display(_cmap_fig) # Prevents plot from showing up twice. plt.close(_cmap_fig) pending = generate_jobs_pending_widget() is_oper = widgets.HTML(value="<h5></h5>", layout=widgets.Layout(justify_content='center')) least_busy = widgets.HTML(value="<h5></h5>", layout=widgets.Layout(justify_content='center')) t1_units = props['qubits'][0][0]['unit'] avg_t1 = round(sum([q[0]['value'] for q in props['qubits']])/n_qubits, 1) t1_widget = widgets.HTML(value="<h5>{t1} {units}</h5>".format(t1=avg_t1, units=t1_units), layout=widgets.Layout()) t2_units = props['qubits'][0][1]['unit'] avg_t2 = round(sum([q[1]['value'] for q in props['qubits']])/n_qubits, 1) t2_widget = widgets.HTML(value="<h5>{t2} {units}</h5>".format(t2=avg_t2, units=t2_units), layout=widgets.Layout()) out = widgets.VBox([name, cmap, qubit_count, pending, least_busy, is_oper, t1_widget, t2_widget], layout=widgets.Layout(display='inline-flex', flex_flow='column', align_items='center')) out._is_alive = True return out
[ "def", "backend_widget", "(", "backend", ")", ":", "config", "=", "backend", ".", "configuration", "(", ")", ".", "to_dict", "(", ")", "props", "=", "backend", ".", "properties", "(", ")", ".", "to_dict", "(", ")", "name", "=", "widgets", ".", "HTML", ...
Creates a backend widget.
[ "Creates", "a", "backend", "widget", "." ]
python
test
42.964286
dlecocq/nsq-py
nsq/util.py
https://github.com/dlecocq/nsq-py/blob/3ecacf6ab7719d38031179277113d875554a0c16/nsq/util.py#L31-L40
def hexify(message): '''Print out printable characters, but others in hex''' import string hexified = [] for char in message: if (char in '\n\r \t') or (char not in string.printable): hexified.append('\\x%02x' % ord(char)) else: hexified.append(char) return ''.join(hexified)
[ "def", "hexify", "(", "message", ")", ":", "import", "string", "hexified", "=", "[", "]", "for", "char", "in", "message", ":", "if", "(", "char", "in", "'\\n\\r \\t'", ")", "or", "(", "char", "not", "in", "string", ".", "printable", ")", ":", "hexifi...
Print out printable characters, but others in hex
[ "Print", "out", "printable", "characters", "but", "others", "in", "hex" ]
python
train
32.6
totokaka/pySpaceGDN
pyspacegdn/spacegdn.py
https://github.com/totokaka/pySpaceGDN/blob/55c8be8d751e24873e0a7f7e99d2b715442ec878/pyspacegdn/spacegdn.py#L64-L72
def _create_user_agent(self): """ Create the user agent and return it as a string. """ user_agent = '{}/{} {}'.format(pyspacegdn.__title__, pyspacegdn.__version__, default_user_agent()) if self.client_name: user_agent = '{}/{} {}'.format(self.client_name, self.client_version, user_agent) return user_agent
[ "def", "_create_user_agent", "(", "self", ")", ":", "user_agent", "=", "'{}/{} {}'", ".", "format", "(", "pyspacegdn", ".", "__title__", ",", "pyspacegdn", ".", "__version__", ",", "default_user_agent", "(", ")", ")", "if", "self", ".", "client_name", ":", "...
Create the user agent and return it as a string.
[ "Create", "the", "user", "agent", "and", "return", "it", "as", "a", "string", "." ]
python
train
51.444444
b3j0f/conf
b3j0f/conf/configurable/log.py
https://github.com/b3j0f/conf/blob/18dd6d5d6560f9b202793739e2330a2181163511/b3j0f/conf/configurable/log.py#L51-L57
def _filehandler(configurable): """Default logging file handler.""" filename = configurable.log_name.replace('.', sep) path = join(configurable.log_path, '{0}.log'.format(filename)) return FileHandler(path, mode='a+')
[ "def", "_filehandler", "(", "configurable", ")", ":", "filename", "=", "configurable", ".", "log_name", ".", "replace", "(", "'.'", ",", "sep", ")", "path", "=", "join", "(", "configurable", ".", "log_path", ",", "'{0}.log'", ".", "format", "(", "filename"...
Default logging file handler.
[ "Default", "logging", "file", "handler", "." ]
python
train
32.714286
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_ha.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_ha.py#L12-L23
def reload_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") reload = ET.Element("reload") config = reload input = ET.SubElement(reload, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "reload_input_rbridge_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "reload", "=", "ET", ".", "Element", "(", "\"reload\"", ")", "config", "=", "reload", "input", "=", "ET", "."...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
35.083333
graphql-python/graphql-core
graphql/validation/rules/overlapping_fields_can_be_merged.py
https://github.com/graphql-python/graphql-core/blob/d8e9d3abe7c209eb2f51cf001402783bfd480596/graphql/validation/rules/overlapping_fields_can_be_merged.py#L609-L630
def _get_referenced_fields_and_fragment_names( context, # ValidationContext cached_fields_and_fragment_names, # type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]] fragment, # type: InlineFragment ): # type: (...) -> Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]] """Given a reference to a fragment, return the represented collection of fields as well as a list of nested fragment names referenced via fragment spreads.""" # Short-circuit building a type from the AST if possible. cached = cached_fields_and_fragment_names.get(fragment.selection_set) if cached: return cached fragment_type = type_from_ast( # type: ignore context.get_schema(), fragment.type_condition ) return _get_fields_and_fragments_names( # type: ignore context, cached_fields_and_fragment_names, fragment_type, fragment.selection_set )
[ "def", "_get_referenced_fields_and_fragment_names", "(", "context", ",", "# ValidationContext", "cached_fields_and_fragment_names", ",", "# type: Dict[SelectionSet, Tuple[Dict[str, List[Tuple[Union[GraphQLInterfaceType, GraphQLObjectType, None], Field, GraphQLField]]], List[str]]]", "fragment", ...
Given a reference to a fragment, return the represented collection of fields as well as a list of nested fragment names referenced via fragment spreads.
[ "Given", "a", "reference", "to", "a", "fragment", "return", "the", "represented", "collection", "of", "fields", "as", "well", "as", "a", "list", "of", "nested", "fragment", "names", "referenced", "via", "fragment", "spreads", "." ]
python
train
46.818182
tensorflow/tensor2tensor
tensor2tensor/models/research/lm_experiments.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/lm_experiments.py#L104-L110
def lmx_relative(): """Language model using relative attention.""" hparams = lmx_base() hparams.self_attention_type = "dot_product_relative_v2" hparams.activation_dtype = "float32" hparams.weight_dtype = "float32" return hparams
[ "def", "lmx_relative", "(", ")", ":", "hparams", "=", "lmx_base", "(", ")", "hparams", ".", "self_attention_type", "=", "\"dot_product_relative_v2\"", "hparams", ".", "activation_dtype", "=", "\"float32\"", "hparams", ".", "weight_dtype", "=", "\"float32\"", "return...
Language model using relative attention.
[ "Language", "model", "using", "relative", "attention", "." ]
python
train
33.428571
brainiak/brainiak
brainiak/fcma/voxelselector.py
https://github.com/brainiak/brainiak/blob/408f12dec2ff56559a26873a848a09e4c8facfeb/brainiak/fcma/voxelselector.py#L423-L465
def _do_cross_validation(self, clf, data, task): """Run voxelwise cross validation based on correlation vectors. clf: classification function the classifier to be used in cross validation data: 3D numpy array If using sklearn.svm.SVC with precomputed kernel, it is in shape [num_processed_voxels, num_epochs, num_epochs]; otherwise it is the input argument corr, in shape [num_processed_voxels, num_epochs, num_voxels] task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- results: list of tuple (voxel_id, accuracy) the accuracy numbers of all voxels, in accuracy descending order the length of array equals the number of assigned voxels """ time1 = time.time() if isinstance(clf, sklearn.svm.SVC) and clf.kernel == 'precomputed'\ and self.use_multiprocessing: inlist = [(clf, i + task[0], self.num_folds, data[i, :, :], self.labels) for i in range(task[1])] with multiprocessing.Pool(self.process_num) as pool: results = list(pool.starmap(_cross_validation_for_one_voxel, inlist)) else: results = [] for i in range(task[1]): result = _cross_validation_for_one_voxel(clf, i + task[0], self.num_folds, data[i, :, :], self.labels) results.append(result) time2 = time.time() logger.debug( 'cross validation for %d voxels, takes %.2f s' % (task[1], (time2 - time1)) ) return results
[ "def", "_do_cross_validation", "(", "self", ",", "clf", ",", "data", ",", "task", ")", ":", "time1", "=", "time", ".", "time", "(", ")", "if", "isinstance", "(", "clf", ",", "sklearn", ".", "svm", ".", "SVC", ")", "and", "clf", ".", "kernel", "==",...
Run voxelwise cross validation based on correlation vectors. clf: classification function the classifier to be used in cross validation data: 3D numpy array If using sklearn.svm.SVC with precomputed kernel, it is in shape [num_processed_voxels, num_epochs, num_epochs]; otherwise it is the input argument corr, in shape [num_processed_voxels, num_epochs, num_voxels] task: tuple (start_voxel_id, num_processed_voxels) depicting the voxels assigned to compute Returns ------- results: list of tuple (voxel_id, accuracy) the accuracy numbers of all voxels, in accuracy descending order the length of array equals the number of assigned voxels
[ "Run", "voxelwise", "cross", "validation", "based", "on", "correlation", "vectors", "." ]
python
train
43.837209
atlassian-api/atlassian-python-api
examples/confluence-draft-page-cleaner.py
https://github.com/atlassian-api/atlassian-python-api/blob/540d269905c3e7547b666fe30c647b2d512cf358/examples/confluence-draft-page-cleaner.py#L13-L33
def clean_draft_pages_from_space(confluence, space_key, count, date_now): """ Remove draft pages from space using datetime.now :param confluence: :param space_key: :param count: :param date_now: :return: int counter """ pages = confluence.get_all_draft_pages_from_space(space=space_key, start=0, limit=500) for page in pages: page_id = page['id'] draft_page = confluence.get_draft_page_by_id(page_id=page_id) last_date_string = draft_page['version']['when'] last_date = datetime.datetime.strptime(last_date_string.replace(".000", "")[:-6], "%Y-%m-%dT%H:%M:%S") if (date_now - last_date) > datetime.timedelta(days=DRAFT_DAYS): count += 1 print("Removing page with page id: " + page_id) confluence.remove_page_as_draft(page_id=page_id) print("Removed page with date " + last_date_string) return count
[ "def", "clean_draft_pages_from_space", "(", "confluence", ",", "space_key", ",", "count", ",", "date_now", ")", ":", "pages", "=", "confluence", ".", "get_all_draft_pages_from_space", "(", "space", "=", "space_key", ",", "start", "=", "0", ",", "limit", "=", "...
Remove draft pages from space using datetime.now :param confluence: :param space_key: :param count: :param date_now: :return: int counter
[ "Remove", "draft", "pages", "from", "space", "using", "datetime", ".", "now", ":", "param", "confluence", ":", ":", "param", "space_key", ":", ":", "param", "count", ":", ":", "param", "date_now", ":", ":", "return", ":", "int", "counter" ]
python
train
43.333333
google/grr
grr/client/grr_response_client/client_actions/linux/linux.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/client/grr_response_client/client_actions/linux/linux.py#L282-L292
def CheckMounts(filename): """Parses the currently mounted devices.""" with io.open(filename, "r") as fd: for line in fd: try: device, mnt_point, fs_type, _ = line.split(" ", 3) except ValueError: continue if fs_type in ACCEPTABLE_FILESYSTEMS: if os.path.exists(device): yield device, fs_type, mnt_point
[ "def", "CheckMounts", "(", "filename", ")", ":", "with", "io", ".", "open", "(", "filename", ",", "\"r\"", ")", "as", "fd", ":", "for", "line", "in", "fd", ":", "try", ":", "device", ",", "mnt_point", ",", "fs_type", ",", "_", "=", "line", ".", "...
Parses the currently mounted devices.
[ "Parses", "the", "currently", "mounted", "devices", "." ]
python
train
32.090909
robotools/fontParts
Lib/fontParts/base/font.py
https://github.com/robotools/fontParts/blob/d2ff106fe95f9d566161d936a645157626568712/Lib/fontParts/base/font.py#L255-L300
def generateFormatToExtension(format, fallbackFormat): """ +--------------+--------------------------------------------------------------------+ | mactype1 | Mac Type 1 font (generates suitcase and LWFN file) | +--------------+--------------------------------------------------------------------+ | macttf | Mac TrueType font (generates suitcase) | +--------------+--------------------------------------------------------------------+ | macttdfont | Mac TrueType font (generates suitcase with resources in data fork) | +--------------+--------------------------------------------------------------------+ | otfcff | PS OpenType (CFF-based) font (OTF) | +--------------+--------------------------------------------------------------------+ | otfttf | PC TrueType/TT OpenType font (TTF) | +--------------+--------------------------------------------------------------------+ | pctype1 | PC Type 1 font (binary/PFB) | +--------------+--------------------------------------------------------------------+ | pcmm | PC MultipleMaster font (PFB) | +--------------+--------------------------------------------------------------------+ | pctype1ascii | PC Type 1 font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | pcmmascii | PC MultipleMaster font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | ufo1 | UFO format version 1 | +--------------+--------------------------------------------------------------------+ | ufo2 | UFO format version 2 | +--------------+--------------------------------------------------------------------+ | ufo3 | UFO format version 3 | +--------------+--------------------------------------------------------------------+ | unixascii | UNIX ASCII font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ """ formatToExtension = dict( # mactype1=None, macttf=".ttf", macttdfont=".dfont", otfcff=".otf", otfttf=".ttf", # pctype1=None, # pcmm=None, # pctype1ascii=None, # pcmmascii=None, ufo1=".ufo", ufo2=".ufo", ufo3=".ufo", unixascii=".pfa", ) return formatToExtension.get(format, fallbackFormat)
[ "def", "generateFormatToExtension", "(", "format", ",", "fallbackFormat", ")", ":", "formatToExtension", "=", "dict", "(", "# mactype1=None,", "macttf", "=", "\".ttf\"", ",", "macttdfont", "=", "\".dfont\"", ",", "otfcff", "=", "\".otf\"", ",", "otfttf", "=", "\...
+--------------+--------------------------------------------------------------------+ | mactype1 | Mac Type 1 font (generates suitcase and LWFN file) | +--------------+--------------------------------------------------------------------+ | macttf | Mac TrueType font (generates suitcase) | +--------------+--------------------------------------------------------------------+ | macttdfont | Mac TrueType font (generates suitcase with resources in data fork) | +--------------+--------------------------------------------------------------------+ | otfcff | PS OpenType (CFF-based) font (OTF) | +--------------+--------------------------------------------------------------------+ | otfttf | PC TrueType/TT OpenType font (TTF) | +--------------+--------------------------------------------------------------------+ | pctype1 | PC Type 1 font (binary/PFB) | +--------------+--------------------------------------------------------------------+ | pcmm | PC MultipleMaster font (PFB) | +--------------+--------------------------------------------------------------------+ | pctype1ascii | PC Type 1 font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | pcmmascii | PC MultipleMaster font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+ | ufo1 | UFO format version 1 | +--------------+--------------------------------------------------------------------+ | ufo2 | UFO format version 2 | +--------------+--------------------------------------------------------------------+ | ufo3 | UFO format version 3 | +--------------+--------------------------------------------------------------------+ | unixascii | UNIX ASCII font (ASCII/PFA) | +--------------+--------------------------------------------------------------------+
[ "+", "--------------", "+", "--------------------------------------------------------------------", "+", "|", "mactype1", "|", "Mac", "Type", "1", "font", "(", "generates", "suitcase", "and", "LWFN", "file", ")", "|", "+", "--------------", "+", "-----------------------...
python
train
66.086957
radjkarl/fancyTools
fancytools/math/rotation.py
https://github.com/radjkarl/fancyTools/blob/4c4d961003dc4ed6e46429a0c24f7e2bb52caa8b/fancytools/math/rotation.py#L10-L29
def rotMatrix2AxisAndAngle(R): """ stackoverflow.com/questions/12463487/obtain-rotation-axis-from-rotation-matrix-and-translation-vector-in-opencv R : 3x3 rotation matrix returns axis, angle """ angle = np.arccos((R[0, 0] + R[1, 1] + R[2, 2] - 1)/ 2) axis = np.array([ # x (R[2, 1] - R[1, 2])/ np.sqrt((R[2, 1] - R[1, 2]) ** 2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2), # y (R[0, 2] - R[2, 0])/ np.sqrt((R[2, 1] - R[1, 2]) ** 2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2), # z (R[1, 0] - R[0, 1])/ np.sqrt((R[2, 1] - R[1, 2])**2 + (R[0, 2] - R[2, 0])**2 + (R[1, 0] - R[0, 1])**2)]) return axis, angle
[ "def", "rotMatrix2AxisAndAngle", "(", "R", ")", ":", "angle", "=", "np", ".", "arccos", "(", "(", "R", "[", "0", ",", "0", "]", "+", "R", "[", "1", ",", "1", "]", "+", "R", "[", "2", ",", "2", "]", "-", "1", ")", "/", "2", ")", "axis", ...
stackoverflow.com/questions/12463487/obtain-rotation-axis-from-rotation-matrix-and-translation-vector-in-opencv R : 3x3 rotation matrix returns axis, angle
[ "stackoverflow", ".", "com", "/", "questions", "/", "12463487", "/", "obtain", "-", "rotation", "-", "axis", "-", "from", "-", "rotation", "-", "matrix", "-", "and", "-", "translation", "-", "vector", "-", "in", "-", "opencv" ]
python
train
41.2
quantopian/pgcontents
pgcontents/hybridmanager.py
https://github.com/quantopian/pgcontents/blob/ed36268b7917332d16868208e1e565742a8753e1/pgcontents/hybridmanager.py#L18-L41
def _resolve_path(path, manager_dict): """ Resolve a path based on a dictionary of manager prefixes. Returns a triple of (prefix, manager, manager_relative_path). """ path = normalize_api_path(path) parts = path.split('/') # Try to find a sub-manager for the first subdirectory. mgr = manager_dict.get(parts[0]) if mgr is not None: return parts[0], mgr, '/'.join(parts[1:]) # Try to find use the root manager, if one was supplied. mgr = manager_dict.get('') if mgr is not None: return '', mgr, path raise HTTPError( 404, "Couldn't resolve path [{path}] and " "no root manager supplied!".format(path=path) )
[ "def", "_resolve_path", "(", "path", ",", "manager_dict", ")", ":", "path", "=", "normalize_api_path", "(", "path", ")", "parts", "=", "path", ".", "split", "(", "'/'", ")", "# Try to find a sub-manager for the first subdirectory.", "mgr", "=", "manager_dict", "."...
Resolve a path based on a dictionary of manager prefixes. Returns a triple of (prefix, manager, manager_relative_path).
[ "Resolve", "a", "path", "based", "on", "a", "dictionary", "of", "manager", "prefixes", "." ]
python
test
28.458333
tr00st/insult_generator
insultgenerator/phrases.py
https://github.com/tr00st/insult_generator/blob/a4496b29ea4beae6b82a4119e8dfbd871be75dbb/insultgenerator/phrases.py#L4-L14
def _unpack_bytes(bytes): """ Unpack a set of bytes into an integer. First pads to 4 bytes. Little endian. """ if bytes == b'': return 0 int_length = 4 len_diff = int_length - len(bytes) bytes = bytes + len_diff * b'\x00' return struct.unpack("<L", bytes)[0]
[ "def", "_unpack_bytes", "(", "bytes", ")", ":", "if", "bytes", "==", "b''", ":", "return", "0", "int_length", "=", "4", "len_diff", "=", "int_length", "-", "len", "(", "bytes", ")", "bytes", "=", "bytes", "+", "len_diff", "*", "b'\\x00'", "return", "st...
Unpack a set of bytes into an integer. First pads to 4 bytes. Little endian.
[ "Unpack", "a", "set", "of", "bytes", "into", "an", "integer", ".", "First", "pads", "to", "4", "bytes", ".", "Little", "endian", "." ]
python
train
27.454545
DataBiosphere/toil
src/toil/provisioners/clusterScaler.py
https://github.com/DataBiosphere/toil/blob/a8252277ff814e7bee0971139c2344f88e44b644/src/toil/provisioners/clusterScaler.py#L417-L437
def addCompletedJob(self, job, wallTime): """ Adds the shape of a completed job to the queue, allowing the scalar to use the last N completed jobs in factoring how many nodes are required in the cluster. :param toil.job.JobNode job: The memory, core and disk requirements of the completed job :param int wallTime: The wall-time taken to complete the job in seconds. """ #Adjust average runtimes to include this job. if job.jobName in self.jobNameToAvgRuntime: prevAvg = self.jobNameToAvgRuntime[job.jobName] prevNum = self.jobNameToNumCompleted[job.jobName] self.jobNameToAvgRuntime[job.jobName] = float(prevAvg*prevNum + wallTime)/(prevNum + 1) self.jobNameToNumCompleted[job.jobName] += 1 else: self.jobNameToAvgRuntime[job.jobName] = wallTime self.jobNameToNumCompleted[job.jobName] = 1 self.totalJobsCompleted += 1 self.totalAvgRuntime = float(self.totalAvgRuntime * (self.totalJobsCompleted - 1) + \ wallTime)/self.totalJobsCompleted
[ "def", "addCompletedJob", "(", "self", ",", "job", ",", "wallTime", ")", ":", "#Adjust average runtimes to include this job.", "if", "job", ".", "jobName", "in", "self", ".", "jobNameToAvgRuntime", ":", "prevAvg", "=", "self", ".", "jobNameToAvgRuntime", "[", "job...
Adds the shape of a completed job to the queue, allowing the scalar to use the last N completed jobs in factoring how many nodes are required in the cluster. :param toil.job.JobNode job: The memory, core and disk requirements of the completed job :param int wallTime: The wall-time taken to complete the job in seconds.
[ "Adds", "the", "shape", "of", "a", "completed", "job", "to", "the", "queue", "allowing", "the", "scalar", "to", "use", "the", "last", "N", "completed", "jobs", "in", "factoring", "how", "many", "nodes", "are", "required", "in", "the", "cluster", ".", ":"...
python
train
53.190476
StackStorm/pybind
pybind/slxos/v17s_1_02/openflow_state/__init__.py
https://github.com/StackStorm/pybind/blob/44c467e71b2b425be63867aba6e6fa28b2cfe7fb/pybind/slxos/v17s_1_02/openflow_state/__init__.py#L503-L524
def _set_queues_interface(self, v, load=False): """ Setter method for queues_interface, mapped from YANG variable /openflow_state/queues_interface (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_interface() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=queues_interface.queues_interface, is_container='container', presence=False, yang_name="queues-interface", rest_name="queues-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues-interface-queues-interface-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False) except (TypeError, ValueError): raise ValueError({ 'error-string': """queues_interface must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=queues_interface.queues_interface, is_container='container', presence=False, yang_name="queues-interface", rest_name="queues-interface", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'openflow-queues-interface-queues-interface-1'}}, namespace='urn:brocade.com:mgmt:brocade-openflow-operational', defining_module='brocade-openflow-operational', yang_type='container', is_config=False)""", }) self.__queues_interface = t if hasattr(self, '_set'): self._set()
[ "def", "_set_queues_interface", "(", "self", ",", "v", ",", "load", "=", "False", ")", ":", "if", "hasattr", "(", "v", ",", "\"_utype\"", ")", ":", "v", "=", "v", ".", "_utype", "(", "v", ")", "try", ":", "t", "=", "YANGDynClass", "(", "v", ",", ...
Setter method for queues_interface, mapped from YANG variable /openflow_state/queues_interface (container) If this variable is read-only (config: false) in the source YANG file, then _set_queues_interface is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_queues_interface() directly.
[ "Setter", "method", "for", "queues_interface", "mapped", "from", "YANG", "variable", "/", "openflow_state", "/", "queues_interface", "(", "container", ")", "If", "this", "variable", "is", "read", "-", "only", "(", "config", ":", "false", ")", "in", "the", "s...
python
train
81.590909
merll/docker-map
dockermap/map/runner/base.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/runner/base.py#L48-L62
def create_network(self, action, n_name, **kwargs): """ Creates a configured network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name. :type n_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict """ c_kwargs = self.get_network_create_kwargs(action, n_name, **kwargs) res = action.client.create_network(**c_kwargs) self._policy.network_names[action.client_name][n_name] = res['Id'] return res
[ "def", "create_network", "(", "self", ",", "action", ",", "n_name", ",", "*", "*", "kwargs", ")", ":", "c_kwargs", "=", "self", ".", "get_network_create_kwargs", "(", "action", ",", "n_name", ",", "*", "*", "kwargs", ")", "res", "=", "action", ".", "cl...
Creates a configured network. :param action: Action configuration. :type action: dockermap.map.runner.ActionConfig :param n_name: Network name. :type n_name: unicode | str :param kwargs: Additional keyword arguments to complement or override the configuration-based values. :type kwargs: dict
[ "Creates", "a", "configured", "network", "." ]
python
train
42.4
pudo/jsongraph
jsongraph/query.py
https://github.com/pudo/jsongraph/blob/35e4f397dbe69cd5553cf9cb9ab98859c3620f03/jsongraph/query.py#L56-L66
def get_name(self, data): """ For non-specific queries, this will return the actual name in the result. """ if self.node.specific_attribute: return self.node.name name = data.get(self.predicate_var) if str(RDF.type) in [self.node.name, name]: return '$schema' if name.startswith(PRED): name = name[len(PRED):] return name
[ "def", "get_name", "(", "self", ",", "data", ")", ":", "if", "self", ".", "node", ".", "specific_attribute", ":", "return", "self", ".", "node", ".", "name", "name", "=", "data", ".", "get", "(", "self", ".", "predicate_var", ")", "if", "str", "(", ...
For non-specific queries, this will return the actual name in the result.
[ "For", "non", "-", "specific", "queries", "this", "will", "return", "the", "actual", "name", "in", "the", "result", "." ]
python
train
36.636364
safarijv/sbo-selenium
sbo_selenium/management/commands/selenium.py
https://github.com/safarijv/sbo-selenium/blob/16539f1b17cda18270033db3b64ab25bc05c5664/sbo_selenium/management/commands/selenium.py#L73-L140
def handle(self, *args, **options): """ Run the specified Selenium test(s) the indicated number of times in the specified browser. """ browser_name = options['browser_name'] count = options['count'] if len(args) > 0: tests = list(args) else: tests = settings.SELENIUM_DEFAULT_TESTS # Kill any orphaned chromedriver processes process = Popen(['killall', 'chromedriver'], stderr=open(os.devnull, 'w')) process.wait() # Clear any old log and screenshots self.clean() docker = None sc_process = None selenium_process = None if options['docker']: if browser_name not in ['chrome', 'firefox']: self.stdout.write('Only chrome and firefox can currently be run in a Docker container') return docker = DockerSelenium(browser=browser_name, port=settings.SELENIUM_DOCKER_PORT, tag=settings.SELENIUM_DOCKER_TAG, debug=settings.SELENIUM_DOCKER_DEBUG) elif 'platform' in options and settings.SELENIUM_SAUCE_CONNECT_PATH: running, sc_process = self.verify_sauce_connect_is_running(options) if not running: return elif browser_name in ['opera', 'safari']: running, selenium_process = self.verify_selenium_server_is_running() if not running: return elif browser_name in ['ipad', 'iphone']: if not self.verify_appium_is_running(): return # Make it so django-nose won't have nosetests choke on our parameters TestRunner = get_runner(django_settings) if hasattr(TestRunner, 'django_opts'): for option in OPTIONS: TestRunner.django_opts.extend(option[0]) # Configure and run the tests try: if docker: docker.start() options['command_executor'] = docker.command_executor() self.update_environment(options) self.run_tests(tests, browser_name, count) finally: # Stop the Selenium Docker container, if running if docker and docker.container_id: docker.stop() # Kill Sauce Connect, if running if sc_process: sc_process.kill() # Kill the Selenium standalone server, if running if selenium_process: selenium_process.kill()
[ "def", "handle", "(", "self", ",", "*", "args", ",", "*", "*", "options", ")", ":", "browser_name", "=", "options", "[", "'browser_name'", "]", "count", "=", "options", "[", "'count'", "]", "if", "len", "(", "args", ")", ">", "0", ":", "tests", "="...
Run the specified Selenium test(s) the indicated number of times in the specified browser.
[ "Run", "the", "specified", "Selenium", "test", "(", "s", ")", "the", "indicated", "number", "of", "times", "in", "the", "specified", "browser", "." ]
python
train
37.544118
data61/clkhash
clkhash/schema.py
https://github.com/data61/clkhash/blob/ec6398d6708a063de83f7c3d6286587bff8e7121/clkhash/schema.py#L181-L198
def from_json_file(schema_file, validate=True): # type: (TextIO, bool) -> Schema """ Load a Schema object from a json file. :param schema_file: A JSON file containing the schema. :param validate: (default True) Raise an exception if the schema does not conform to the master schema. :raises SchemaError: When the schema is invalid. :return: the Schema """ try: schema_dict = json.load(schema_file) except ValueError as e: # In Python 3 we can be more specific # with json.decoder.JSONDecodeError, # but that doesn't exist in Python 2. msg = 'The schema is not a valid JSON file.' raise_from(SchemaError(msg), e) return from_json_dict(schema_dict, validate=validate)
[ "def", "from_json_file", "(", "schema_file", ",", "validate", "=", "True", ")", ":", "# type: (TextIO, bool) -> Schema", "try", ":", "schema_dict", "=", "json", ".", "load", "(", "schema_file", ")", "except", "ValueError", "as", "e", ":", "# In Python 3 we can be ...
Load a Schema object from a json file. :param schema_file: A JSON file containing the schema. :param validate: (default True) Raise an exception if the schema does not conform to the master schema. :raises SchemaError: When the schema is invalid. :return: the Schema
[ "Load", "a", "Schema", "object", "from", "a", "json", "file", ".", ":", "param", "schema_file", ":", "A", "JSON", "file", "containing", "the", "schema", ".", ":", "param", "validate", ":", "(", "default", "True", ")", "Raise", "an", "exception", "if", ...
python
train
42.111111
twisted/txaws
txaws/server/registry.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/registry.py#L46-L54
def scan(self, module, onerror=None, ignore=None): """Scan the given module object for L{Method}s and register them.""" from venusian import Scanner scanner = Scanner(registry=self) kwargs = {"onerror": onerror, "categories": ["method"]} if ignore is not None: # Only pass it if specified, for backward compatibility kwargs["ignore"] = ignore scanner.scan(module, **kwargs)
[ "def", "scan", "(", "self", ",", "module", ",", "onerror", "=", "None", ",", "ignore", "=", "None", ")", ":", "from", "venusian", "import", "Scanner", "scanner", "=", "Scanner", "(", "registry", "=", "self", ")", "kwargs", "=", "{", "\"onerror\"", ":",...
Scan the given module object for L{Method}s and register them.
[ "Scan", "the", "given", "module", "object", "for", "L", "{", "Method", "}", "s", "and", "register", "them", "." ]
python
train
48.555556
zyga/python-glibc
pyglibc/_subreaper.py
https://github.com/zyga/python-glibc/blob/d6fdb306b123a995471584a5201155c60a34448a/pyglibc/_subreaper.py#L103-L143
def enabled(self): """ read or write the child sub-reaper flag of the current process This property behaves in the following manner: * If a read is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) then no further attempts are made and the outcome is ``False``. * If a read is attempted and the current status is ``SR_UNKNOWN`` then a ``prctl(PR_GET_CHILD_SUBREAPER, ...)`` call is made and the outcome depends on the returned value. If prctl fails then status is set to ``SR_UNSUPPORTED`` and the return value is ``False``. If the prctl call succeeds then status is set to either ``SR_ENABLED`` or ``SR_DISABLED`` and ``True`` or ``False`` is returned, respectively. * If a write is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) *and* the write would have enabled the flag, a ValueError is raised with an appropriate message. Otherwise a write is attempted. If the attempt to enable the flag fails a ValueError is raised, just as in the previous case. * If a write intending to disable the flag fails then this failure is silently ignored but status is set to ``SR_UNSUPPORTED``. * If a write succeeds then the status is set accordingly to ``SR_ENABLED`` or ``SR_DISABLED``, depending on the value written ``True`` or ``False`` respectively. In other words, this property behaves as if it was really calling prctl() but it is not going to repeat operations that will always fail. Nor will it ignore failures silently where that matters. """ if self._status == self.SR_UNSUPPORTED: return False status = c_int() try: prctl(PR_GET_CHILD_SUBREAPER, addressof(status), 0, 0, 0) except OSError: self._status = self.SR_UNSUPPORTED else: self._status = self.SR_ENABLED if status else self.SR_DISABLED return self._status == self.SR_ENABLED
[ "def", "enabled", "(", "self", ")", ":", "if", "self", ".", "_status", "==", "self", ".", "SR_UNSUPPORTED", ":", "return", "False", "status", "=", "c_int", "(", ")", "try", ":", "prctl", "(", "PR_GET_CHILD_SUBREAPER", ",", "addressof", "(", "status", ")"...
read or write the child sub-reaper flag of the current process This property behaves in the following manner: * If a read is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) then no further attempts are made and the outcome is ``False``. * If a read is attempted and the current status is ``SR_UNKNOWN`` then a ``prctl(PR_GET_CHILD_SUBREAPER, ...)`` call is made and the outcome depends on the returned value. If prctl fails then status is set to ``SR_UNSUPPORTED`` and the return value is ``False``. If the prctl call succeeds then status is set to either ``SR_ENABLED`` or ``SR_DISABLED`` and ``True`` or ``False`` is returned, respectively. * If a write is attempted and a prior read or write has determined that this feature is unavailable (status is equal to ``SR_UNSUPPORTED``) *and* the write would have enabled the flag, a ValueError is raised with an appropriate message. Otherwise a write is attempted. If the attempt to enable the flag fails a ValueError is raised, just as in the previous case. * If a write intending to disable the flag fails then this failure is silently ignored but status is set to ``SR_UNSUPPORTED``. * If a write succeeds then the status is set accordingly to ``SR_ENABLED`` or ``SR_DISABLED``, depending on the value written ``True`` or ``False`` respectively. In other words, this property behaves as if it was really calling prctl() but it is not going to repeat operations that will always fail. Nor will it ignore failures silently where that matters.
[ "read", "or", "write", "the", "child", "sub", "-", "reaper", "flag", "of", "the", "current", "process" ]
python
train
53.780488
duniter/duniter-python-api
duniterpy/api/bma/wot.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/bma/wot.py#L404-L412
async def identity_of(client: Client, search: str) -> dict: """ GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return: """ return await client.get(MODULE + '/identity-of/%s' % search, schema=IDENTITY_OF_SCHEMA)
[ "async", "def", "identity_of", "(", "client", ":", "Client", ",", "search", ":", "str", ")", "->", "dict", ":", "return", "await", "client", ".", "get", "(", "MODULE", "+", "'/identity-of/%s'", "%", "search", ",", "schema", "=", "IDENTITY_OF_SCHEMA", ")" ]
GET Identity data written in the blockchain :param client: Client to connect to the api :param search: UID or public key :return:
[ "GET", "Identity", "data", "written", "in", "the", "blockchain" ]
python
train
34
candango/firenado
firenado/launcher.py
https://github.com/candango/firenado/blob/4b1f628e485b521e161d64169c46a9818f26949f/firenado/launcher.py#L208-L223
def sig_handler(self, sig, _): """ Handle the signal sent to the process :param sig: Signal set to the process :param _: Frame is not being used """ import tornado.ioloop from tornado.process import task_id tid = task_id() pid = os.getpid() if tid is None: logger.warning("main process (pid %s) caught signal: %s" % (pid, sig)) else: logger.warning("child %s (pid %s) caught signal: %s" % (tid, pid, sig)) tornado.ioloop.IOLoop.current().add_callback(self.shutdown)
[ "def", "sig_handler", "(", "self", ",", "sig", ",", "_", ")", ":", "import", "tornado", ".", "ioloop", "from", "tornado", ".", "process", "import", "task_id", "tid", "=", "task_id", "(", ")", "pid", "=", "os", ".", "getpid", "(", ")", "if", "tid", ...
Handle the signal sent to the process :param sig: Signal set to the process :param _: Frame is not being used
[ "Handle", "the", "signal", "sent", "to", "the", "process", ":", "param", "sig", ":", "Signal", "set", "to", "the", "process", ":", "param", "_", ":", "Frame", "is", "not", "being", "used" ]
python
train
38.5625
orb-framework/orb
orb/core/column_types/string.py
https://github.com/orb-framework/orb/blob/575be2689cb269e65a0a2678232ff940acc19e5a/orb/core/column_types/string.py#L392-L408
def generate(self): """ Generates a new token for this column based on its bit length. This method will not ensure uniqueness in the model itself, that should be checked against the model records in the database first. :return: <str> """ try: model = self.schema().model() except AttributeError: return os.urandom(self.__bits).encode('hex') else: while True: token = os.urandom(self.__bits).encode('hex') if model.select(where=orb.Query(self) == token).count() == 0: return token
[ "def", "generate", "(", "self", ")", ":", "try", ":", "model", "=", "self", ".", "schema", "(", ")", ".", "model", "(", ")", "except", "AttributeError", ":", "return", "os", ".", "urandom", "(", "self", ".", "__bits", ")", ".", "encode", "(", "'hex...
Generates a new token for this column based on its bit length. This method will not ensure uniqueness in the model itself, that should be checked against the model records in the database first. :return: <str>
[ "Generates", "a", "new", "token", "for", "this", "column", "based", "on", "its", "bit", "length", ".", "This", "method", "will", "not", "ensure", "uniqueness", "in", "the", "model", "itself", "that", "should", "be", "checked", "against", "the", "model", "r...
python
train
36.941176
ray-project/ray
python/ray/tune/suggest/bayesopt.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/suggest/bayesopt.py#L79-L90
def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Passes the result to BayesOpt unless early terminated or errored""" if result: self.optimizer.register( params=self._live_trial_mapping[trial_id], target=result[self._reward_attr]) del self._live_trial_mapping[trial_id]
[ "def", "on_trial_complete", "(", "self", ",", "trial_id", ",", "result", "=", "None", ",", "error", "=", "False", ",", "early_terminated", "=", "False", ")", ":", "if", "result", ":", "self", ".", "optimizer", ".", "register", "(", "params", "=", "self",...
Passes the result to BayesOpt unless early terminated or errored
[ "Passes", "the", "result", "to", "BayesOpt", "unless", "early", "terminated", "or", "errored" ]
python
train
39.416667
kytos/kytos-utils
kytos/utils/napps.py
https://github.com/kytos/kytos-utils/blob/b4750c618d15cff75970ea6124bda4d2b9a33578/kytos/utils/napps.py#L292-L307
def install_remote(self): """Download, extract and install NApp.""" package, pkg_folder = None, None try: package = self._download() pkg_folder = self._extract(package) napp_folder = self._get_local_folder(pkg_folder) dst = self._installed / self.user / self.napp self._check_module(dst.parent) shutil.move(str(napp_folder), str(dst)) finally: # Delete temporary files if package: Path(package).unlink() if pkg_folder and pkg_folder.exists(): shutil.rmtree(str(pkg_folder))
[ "def", "install_remote", "(", "self", ")", ":", "package", ",", "pkg_folder", "=", "None", ",", "None", "try", ":", "package", "=", "self", ".", "_download", "(", ")", "pkg_folder", "=", "self", ".", "_extract", "(", "package", ")", "napp_folder", "=", ...
Download, extract and install NApp.
[ "Download", "extract", "and", "install", "NApp", "." ]
python
train
39.375
mitsei/dlkit
dlkit/records/assessment/basic/multi_choice_records.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/records/assessment/basic/multi_choice_records.py#L141-L149
def clear_choice(self, choice): """stub""" if len(self.my_osid_object_form._my_map['choices']) == 0: raise IllegalState('there are currently no choices defined for this question') if (len(self.my_osid_object_form._my_map['choices']) == 1 and choice in self.my_osid_object_form._my_map['choices']): raise IllegalState() self.my_osid_object_form._my_map['choices'] = \ [c for c in self.my_osid_object_form._my_map['choices'] if c != choice]
[ "def", "clear_choice", "(", "self", ",", "choice", ")", ":", "if", "len", "(", "self", ".", "my_osid_object_form", ".", "_my_map", "[", "'choices'", "]", ")", "==", "0", ":", "raise", "IllegalState", "(", "'there are currently no choices defined for this question'...
stub
[ "stub" ]
python
train
57.111111
developmentseed/landsat-util
landsat/utils.py
https://github.com/developmentseed/landsat-util/blob/92dc81771ddaa64a8a9124a89a6516b52485374b/landsat/utils.py#L167-L188
def three_digit(number): """ Add 0s to inputs that their length is less than 3. :param number: The number to convert :type number: int :returns: String :example: >>> three_digit(1) '001' """ number = str(number) if len(number) == 1: return u'00%s' % number elif len(number) == 2: return u'0%s' % number else: return number
[ "def", "three_digit", "(", "number", ")", ":", "number", "=", "str", "(", "number", ")", "if", "len", "(", "number", ")", "==", "1", ":", "return", "u'00%s'", "%", "number", "elif", "len", "(", "number", ")", "==", "2", ":", "return", "u'0%s'", "%"...
Add 0s to inputs that their length is less than 3. :param number: The number to convert :type number: int :returns: String :example: >>> three_digit(1) '001'
[ "Add", "0s", "to", "inputs", "that", "their", "length", "is", "less", "than", "3", "." ]
python
train
18.545455
gccxml/pygccxml
pygccxml/declarations/pointer_traits.py
https://github.com/gccxml/pygccxml/blob/2b1efbb9e37ceb2ae925c7f3ce1570f476db9e1e/pygccxml/declarations/pointer_traits.py#L116-L129
def _search_in_bases(type_): """Implementation detail.""" found = False for base_type in type_.declaration.bases: try: found = internal_type_traits.get_by_name( base_type.related_class, "element_type") except runtime_errors.declaration_not_found_t: pass if found: return found raise RuntimeError( ("Unable to find 'element_type' declaration '%s'" "in type '%s'.") % type_.decl_string)
[ "def", "_search_in_bases", "(", "type_", ")", ":", "found", "=", "False", "for", "base_type", "in", "type_", ".", "declaration", ".", "bases", ":", "try", ":", "found", "=", "internal_type_traits", ".", "get_by_name", "(", "base_type", ".", "related_class", ...
Implementation detail.
[ "Implementation", "detail", "." ]
python
train
34.285714
SmokinCaterpillar/pypet
pypet/pypetlogging.py
https://github.com/SmokinCaterpillar/pypet/blob/97ad3e80d46dbdea02deeb98ea41f05a19565826/pypet/pypetlogging.py#L417-L445
def _check_and_replace_parser_args(parser, section, option, rename_func, make_dirs=True): """ Searches for parser settings that define filenames. If such settings are found, they are renamed according to the wildcard rules. Moreover, it is also tried to create the corresponding folders. :param parser: A config parser :param section: A config section :param option: The section option :param rename_func: A function to rename found files :param make_dirs: If the directories of the file should be created. """ args = parser.get(section, option, raw=True) strings = get_strings(args) replace = False for string in strings: isfilename = any(x in string for x in FILENAME_INDICATORS) if isfilename: newstring = rename_func(string) if make_dirs: try_make_dirs(newstring) # To work with windows path specifications we need this replacement: raw_string = string.replace('\\', '\\\\') raw_newstring = newstring.replace('\\', '\\\\') args = args.replace(raw_string, raw_newstring) replace = True if replace: parser.set(section, option, args)
[ "def", "_check_and_replace_parser_args", "(", "parser", ",", "section", ",", "option", ",", "rename_func", ",", "make_dirs", "=", "True", ")", ":", "args", "=", "parser", ".", "get", "(", "section", ",", "option", ",", "raw", "=", "True", ")", "strings", ...
Searches for parser settings that define filenames. If such settings are found, they are renamed according to the wildcard rules. Moreover, it is also tried to create the corresponding folders. :param parser: A config parser :param section: A config section :param option: The section option :param rename_func: A function to rename found files :param make_dirs: If the directories of the file should be created.
[ "Searches", "for", "parser", "settings", "that", "define", "filenames", "." ]
python
test
44.482759
pyviz/holoviews
holoviews/__init__.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/__init__.py#L66-L88
def help(obj, visualization=True, ansi=True, backend=None, recursive=False, pattern=None): """ Extended version of the built-in help that supports parameterized functions and objects. A pattern (regular expression) may be used to filter the output and if recursive is set to True, documentation for the supplied object is shown. Note that the recursive option will only work with an object instance and not a class. If ansi is set to False, all ANSI color codes are stripped out. """ backend = backend if backend else Store.current_backend info = Store.info(obj, ansi=ansi, backend=backend, visualization=visualization, recursive=recursive, pattern=pattern, elements=elements_list) msg = ("\nTo view the visualization options applicable to this " "object or class, use:\n\n" " holoviews.help(obj, visualization=True)\n\n") if info: print((msg if visualization is False else '') + info) else: pydoc.help(obj)
[ "def", "help", "(", "obj", ",", "visualization", "=", "True", ",", "ansi", "=", "True", ",", "backend", "=", "None", ",", "recursive", "=", "False", ",", "pattern", "=", "None", ")", ":", "backend", "=", "backend", "if", "backend", "else", "Store", "...
Extended version of the built-in help that supports parameterized functions and objects. A pattern (regular expression) may be used to filter the output and if recursive is set to True, documentation for the supplied object is shown. Note that the recursive option will only work with an object instance and not a class. If ansi is set to False, all ANSI color codes are stripped out.
[ "Extended", "version", "of", "the", "built", "-", "in", "help", "that", "supports", "parameterized", "functions", "and", "objects", ".", "A", "pattern", "(", "regular", "expression", ")", "may", "be", "used", "to", "filter", "the", "output", "and", "if", "...
python
train
44.086957
DataONEorg/d1_python
lib_common/src/d1_common/utils/filesystem.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/lib_common/src/d1_common/utils/filesystem.py#L115-L130
def abs_path(rel_path): """Convert a path that is relative to the module from which this function is called, to an absolute path. Args: rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``. """ # noinspection PyProtectedMember return os.path.abspath( os.path.join(os.path.dirname(sys._getframe(1).f_code.co_filename), rel_path) )
[ "def", "abs_path", "(", "rel_path", ")", ":", "# noinspection PyProtectedMember", "return", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "sys", ".", "_getframe", "(", "1", ")", "."...
Convert a path that is relative to the module from which this function is called, to an absolute path. Args: rel_path: str Path relative to the location of the module file from which this function is called. Returns: str : Absolute path to the location specified by ``rel_path``.
[ "Convert", "a", "path", "that", "is", "relative", "to", "the", "module", "from", "which", "this", "function", "is", "called", "to", "an", "absolute", "path", "." ]
python
train
31
argaen/python-google-distance-matrix
google_distance_matrix/core.py
https://github.com/argaen/python-google-distance-matrix/blob/20c07bf7d560180ef380b3148616f67f55246a5c/google_distance_matrix/core.py#L112-L130
def get_closest_points(self, max_distance=None, origin_index=0, origin_raw=None): """ Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance. """ if not self.dict_response['distance']['value']: self.get_distance_values() if origin_raw: origin = copy.deepcopy(self.dict_response['distance']['value'][origin_raw]) else: origin = copy.deepcopy(self.dict_response['distance']['value'][self.origins[origin_index]]) tmp_origin = copy.deepcopy(origin) if max_distance is not None: for k, v in tmp_origin.iteritems(): if v > max_distance or v == 'ZERO_RESULTS': del(origin[k]) return origin
[ "def", "get_closest_points", "(", "self", ",", "max_distance", "=", "None", ",", "origin_index", "=", "0", ",", "origin_raw", "=", "None", ")", ":", "if", "not", "self", ".", "dict_response", "[", "'distance'", "]", "[", "'value'", "]", ":", "self", ".",...
Get closest points to a given origin. Returns a list of 2 element tuples where first element is the destination and the second is the distance.
[ "Get", "closest", "points", "to", "a", "given", "origin", ".", "Returns", "a", "list", "of", "2", "element", "tuples", "where", "first", "element", "is", "the", "destination", "and", "the", "second", "is", "the", "distance", "." ]
python
train
42.684211
idlesign/uwsgiconf
uwsgiconf/options/routing_routers.py
https://github.com/idlesign/uwsgiconf/blob/475407acb44199edbf7e0a66261bfeb51de1afae/uwsgiconf/options/routing_routers.py#L726-L747
def set_connections_params( self, harakiri=None, timeout_socket=None, retry_delay=None, retry_max=None, use_xclient=None): """Sets connection-related parameters. :param int harakiri: Set gateway harakiri timeout (seconds). :param int timeout_socket: Node socket timeout (seconds). Default: 60. :param int retry_delay: Retry connections to dead static nodes after the specified amount of seconds. Default: 30. :param int retry_max: Maximum number of retries/fallbacks to other nodes. Default: 3. :param bool use_xclient: Use the xclient protocol to pass the client address. """ super(RouterRaw, self).set_connections_params(**filter_locals(locals(), ['retry_max', 'use_xclient'])) self._set_aliased('max-retries', retry_max) self._set_aliased('xclient', use_xclient) return self
[ "def", "set_connections_params", "(", "self", ",", "harakiri", "=", "None", ",", "timeout_socket", "=", "None", ",", "retry_delay", "=", "None", ",", "retry_max", "=", "None", ",", "use_xclient", "=", "None", ")", ":", "super", "(", "RouterRaw", ",", "self...
Sets connection-related parameters. :param int harakiri: Set gateway harakiri timeout (seconds). :param int timeout_socket: Node socket timeout (seconds). Default: 60. :param int retry_delay: Retry connections to dead static nodes after the specified amount of seconds. Default: 30. :param int retry_max: Maximum number of retries/fallbacks to other nodes. Default: 3. :param bool use_xclient: Use the xclient protocol to pass the client address.
[ "Sets", "connection", "-", "related", "parameters", "." ]
python
train
39.863636
opencobra/cobrapy
cobra/core/gene.py
https://github.com/opencobra/cobrapy/blob/9d1987cdb3a395cf4125a3439c3b002ff2be2009/cobra/core/gene.py#L206-L216
def knock_out(self): """Knockout gene by marking it as non-functional and setting all associated reactions bounds to zero. The change is reverted upon exit if executed within the model as context. """ self.functional = False for reaction in self.reactions: if not reaction.functional: reaction.bounds = (0, 0)
[ "def", "knock_out", "(", "self", ")", ":", "self", ".", "functional", "=", "False", "for", "reaction", "in", "self", ".", "reactions", ":", "if", "not", "reaction", ".", "functional", ":", "reaction", ".", "bounds", "=", "(", "0", ",", "0", ")" ]
Knockout gene by marking it as non-functional and setting all associated reactions bounds to zero. The change is reverted upon exit if executed within the model as context.
[ "Knockout", "gene", "by", "marking", "it", "as", "non", "-", "functional", "and", "setting", "all", "associated", "reactions", "bounds", "to", "zero", "." ]
python
valid
34.909091
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Node/__init__.py#L1232-L1242
def add_dependency(self, depend): """Adds dependencies.""" try: self._add_child(self.depends, self.depends_set, depend) except TypeError as e: e = e.args[0] if SCons.Util.is_List(e): s = list(map(str, e)) else: s = str(e) raise SCons.Errors.UserError("attempted to add a non-Node dependency to %s:\n\t%s is a %s, not a Node" % (str(self), s, type(e)))
[ "def", "add_dependency", "(", "self", ",", "depend", ")", ":", "try", ":", "self", ".", "_add_child", "(", "self", ".", "depends", ",", "self", ".", "depends_set", ",", "depend", ")", "except", "TypeError", "as", "e", ":", "e", "=", "e", ".", "args",...
Adds dependencies.
[ "Adds", "dependencies", "." ]
python
train
41.545455
dunovank/jupyter-themes
jupyterthemes/jtplot.py
https://github.com/dunovank/jupyter-themes/blob/421016c2e4fed75fa1830d664c10478d9bd25ed1/jupyterthemes/jtplot.py#L215-L223
def figsize(x=8, y=7., aspect=1.): """ manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar """ # update rcparams with adjusted figsize params mpl.rcParams.update({'figure.figsize': (x*aspect, y)})
[ "def", "figsize", "(", "x", "=", "8", ",", "y", "=", "7.", ",", "aspect", "=", "1.", ")", ":", "# update rcparams with adjusted figsize params", "mpl", ".", "rcParams", ".", "update", "(", "{", "'figure.figsize'", ":", "(", "x", "*", "aspect", ",", "y", ...
manually set the default figure size of plots ::Arguments:: x (float): x-axis size y (float): y-axis size aspect (float): aspect ratio scalar
[ "manually", "set", "the", "default", "figure", "size", "of", "plots", "::", "Arguments", "::", "x", "(", "float", ")", ":", "x", "-", "axis", "size", "y", "(", "float", ")", ":", "y", "-", "axis", "size", "aspect", "(", "float", ")", ":", "aspect",...
python
train
35.777778
housecanary/hc-api-python
housecanary/object.py
https://github.com/housecanary/hc-api-python/blob/2bb9e2208b34e8617575de45934357ee33b8531c/housecanary/object.py#L243-L259
def create_from_json(cls, json_data): """Deserialize msa json data into a Msa object Args: json_data (dict): The json data for this msa Returns: Msa object """ msa = Msa() msa.msa = json_data["msa_info"]["msa"] msa.meta = json_data["meta"] if "meta" in json_data else None msa.component_results = _create_component_results(json_data, "msa_info") return msa
[ "def", "create_from_json", "(", "cls", ",", "json_data", ")", ":", "msa", "=", "Msa", "(", ")", "msa", ".", "msa", "=", "json_data", "[", "\"msa_info\"", "]", "[", "\"msa\"", "]", "msa", ".", "meta", "=", "json_data", "[", "\"meta\"", "]", "if", "\"m...
Deserialize msa json data into a Msa object Args: json_data (dict): The json data for this msa Returns: Msa object
[ "Deserialize", "msa", "json", "data", "into", "a", "Msa", "object" ]
python
train
25.941176
tortoise/tortoise-orm
tortoise/queryset.py
https://github.com/tortoise/tortoise-orm/blob/7d16457731905e19d4d06ccd5b4ea16d4a9447b2/tortoise/queryset.py#L351-L358
def first(self) -> "QuerySet": """ Limit queryset to one object and return one object instead of list. """ queryset = self._clone() queryset._limit = 1 queryset._single = True return queryset
[ "def", "first", "(", "self", ")", "->", "\"QuerySet\"", ":", "queryset", "=", "self", ".", "_clone", "(", ")", "queryset", ".", "_limit", "=", "1", "queryset", ".", "_single", "=", "True", "return", "queryset" ]
Limit queryset to one object and return one object instead of list.
[ "Limit", "queryset", "to", "one", "object", "and", "return", "one", "object", "instead", "of", "list", "." ]
python
train
30
wummel/linkchecker
linkcheck/updater.py
https://github.com/wummel/linkchecker/blob/c2ce810c3fb00b895a841a7be6b2e78c64e7b042/linkcheck/updater.py#L57-L71
def get_online_version (): """Download update info and parse it.""" # prevent getting a cached answer headers = {'Pragma': 'no-cache', 'Cache-Control': 'no-cache'} content, info = get_content(UPDATE_URL, addheaders=headers) if content is None: return content, info version, url = None, None for line in content.splitlines(): if line.startswith(VERSION_TAG): version = line.split(':', 1)[1].strip() elif line.startswith(URL_TAG): url = line.split(':', 1)[1].strip() url = url.replace('${version}', version) return version, url
[ "def", "get_online_version", "(", ")", ":", "# prevent getting a cached answer", "headers", "=", "{", "'Pragma'", ":", "'no-cache'", ",", "'Cache-Control'", ":", "'no-cache'", "}", "content", ",", "info", "=", "get_content", "(", "UPDATE_URL", ",", "addheaders", "...
Download update info and parse it.
[ "Download", "update", "info", "and", "parse", "it", "." ]
python
train
40.2
MartinThoma/hwrt
hwrt/handwritten_data.py
https://github.com/MartinThoma/hwrt/blob/725c21a3d0f5a30b8492cbc184b3688ceb364e1c/hwrt/handwritten_data.py#L299-L311
def get_center_of_mass(self): """ Get a tuple (x,y) that is the center of mass. The center of mass is not necessarily the same as the center of the bounding box. Imagine a black square and a single dot wide outside of the square. """ xsum, ysum, counter = 0., 0., 0 for stroke in self.get_pointlist(): for point in stroke: xsum += point['x'] ysum += point['y'] counter += 1 return (xsum / counter, ysum / counter)
[ "def", "get_center_of_mass", "(", "self", ")", ":", "xsum", ",", "ysum", ",", "counter", "=", "0.", ",", "0.", ",", "0", "for", "stroke", "in", "self", ".", "get_pointlist", "(", ")", ":", "for", "point", "in", "stroke", ":", "xsum", "+=", "point", ...
Get a tuple (x,y) that is the center of mass. The center of mass is not necessarily the same as the center of the bounding box. Imagine a black square and a single dot wide outside of the square.
[ "Get", "a", "tuple", "(", "x", "y", ")", "that", "is", "the", "center", "of", "mass", ".", "The", "center", "of", "mass", "is", "not", "necessarily", "the", "same", "as", "the", "center", "of", "the", "bounding", "box", ".", "Imagine", "a", "black", ...
python
train
40.384615
Miserlou/django-knockout-modeler
knockout_modeler/ko.py
https://github.com/Miserlou/django-knockout-modeler/blob/714d21cc5ed008f132cea01dbae9f214c2bf1b76/knockout_modeler/ko.py#L18-L37
def get_fields(model): """ Returns a Model's knockout_fields, or the default set of field names. """ try: if hasattr(model, "knockout_fields"): fields = model.knockout_fields() else: try: fields = model_to_dict(model).keys() except Exception as e: fields = model._meta.get_fields() return fields # Crash proofing except Exception as e: logger.exception(e) return []
[ "def", "get_fields", "(", "model", ")", ":", "try", ":", "if", "hasattr", "(", "model", ",", "\"knockout_fields\"", ")", ":", "fields", "=", "model", ".", "knockout_fields", "(", ")", "else", ":", "try", ":", "fields", "=", "model_to_dict", "(", "model",...
Returns a Model's knockout_fields, or the default set of field names.
[ "Returns", "a", "Model", "s", "knockout_fields", "or", "the", "default", "set", "of", "field", "names", "." ]
python
train
24.2
swharden/SWHLab
swhlab/core.py
https://github.com/swharden/SWHLab/blob/a86c3c65323cec809a4bd4f81919644927094bf5/swhlab/core.py#L109-L169
def setsweep(self, sweep=0, channel=0): """set the sweep and channel of an ABF. Both start at 0.""" try: sweep=int(sweep) except: self.log.error("trying to set sweep to [%s]",sweep) return if sweep<0: sweep=self.sweeps-1-sweep # if negative, start from the end sweep=max(0,min(sweep,self.sweeps-1)) # correct for out of range sweeps if 'sweep' in dir(self) and self.sweep == sweep and self.derivative is False: self.log.debug("sweep %d already set",sweep) return #self.log.debug("loading sweep %d (Ch%d)",sweep,channel) self.channels=self.ABFblock.segments[sweep].size["analogsignals"] if self.channels>1 and sweep==0: self.log.info("WARNING: multichannel not yet supported!") #TODO: self.trace = self.ABFblock.segments[sweep].analogsignals[channel] self.sweep=sweep # currently selected sweep self.channel=channel # currently selected channel # sweep information self.rate = int(self.trace.sampling_rate) # Hz self.period = float(1/self.rate) # seconds (inverse of sample rate) self.pointsPerSec = int(self.rate) # for easy access self.pointsPerMs = int(self.rate/1000.0) # for easy access self.sweepSize = len(self.trace) # number of data points per sweep self.sweepInterval = self.trace.duration.magnitude # sweep interval (seconds) self.sweepLength = float(self.trace.t_stop-self.trace.t_start) # in seconds self.length = self.sweepLength*self.sweeps # length (sec) of total recording self.lengthMinutes = self.length/60.0 # length (minutes) of total recording if str(self.trace.dimensionality) == 'pA': self.units,self.units2="pA","clamp current (pA)" self.unitsD,self.unitsD2="pA/ms","current velocity (pA/ms)" self.protoUnits,self.protoUnits2="mV","command voltage (mV)" elif str(self.trace.dimensionality) == 'mV': self.units,self.units2="mV","membrane potential (mV)" self.unitsD,self.unitsD2="V/s","potential velocity (V/s)" self.protoUnits,self.protoUnits2="pA","command current (pA)" else: self.units,self.units2="?","unknown units" self.unitsD,self.unitsD2="?","unknown units" # sweep data self.sweepY = self.trace.magnitude # sweep data (mV or pA) self.sweepT = self.trace.times.magnitude # actual sweep times (sec) self.sweepStart = float(self.trace.t_start) # time start of sweep (sec) self.sweepX2 = self.sweepT-self.trace.t_start.magnitude # sweeps overlap self.sweepX = self.sweepX2+sweep*self.sweepInterval # assume no gaps if self.derivative: self.log.debug("taking derivative") #self.sweepD=np.diff(self.sweepY) # take derivative self.sweepD=self.sweepY[1:]-self.sweepY[:-1] # better? self.sweepD=np.insert(self.sweepD,0,self.sweepD[0]) # add a point self.sweepD/=(self.period*1000) # correct for sample rate else: self.sweepD=[0] # derivative is forced to be empty # generate the protocol too self.generate_protocol()
[ "def", "setsweep", "(", "self", ",", "sweep", "=", "0", ",", "channel", "=", "0", ")", ":", "try", ":", "sweep", "=", "int", "(", "sweep", ")", "except", ":", "self", ".", "log", ".", "error", "(", "\"trying to set sweep to [%s]\"", ",", "sweep", ")"...
set the sweep and channel of an ABF. Both start at 0.
[ "set", "the", "sweep", "and", "channel", "of", "an", "ABF", ".", "Both", "start", "at", "0", "." ]
python
valid
52.721311
JoseAntFer/pyny3d
pyny3d/geoms.py
https://github.com/JoseAntFer/pyny3d/blob/fb81684935a24f7e50c975cb4383c81a63ab56df/pyny3d/geoms.py#L876-L947
def melt(self, plot=False): """ Find and merge groups of polygons in the surface that meet the following criteria: * Are coplanars. * Are contiguous. * The result is convex. This method is very useful at reducing the number the items and, therefore, the shadowing time computing. Before override this instance, it is saved and can be restored with ``.restore()`` :param plot: If True, generates the before and after visualizations for the surface. Use it to check the results. :type plot: bool :returns: None .. warning:: This method do not check if the merged polygons are actually convex. The convex hull of the union is directly calculated. For this reason, it is very important to visualy check the solution. """ from pyny3d.utils import bool2index from scipy.spatial import ConvexHull # First, coplanarity ## Normalize parametric equations para = [poly.get_parametric() for poly in self] para = np.array([p/np.linalg.norm(p) for p in para]) n = para.shape[0] ## Coincidences cop = [] for i, plane in enumerate(para[:-1]): indexes = np.zeros((n-i-1, 4)) for c in range(4): indexes[:, c] = np.isclose(para[i+1:, c], plane[c]) pos = bool2index(indexes.sum(axis=1)==4)+i+1 if pos.shape[0] > 0: cop.append(np.hstack((i, pos))) para[pos, :] = np.nan # Second, contiguity substituted = [] cop_cont = [] for i, group in enumerate(cop): polygons = [self[i] for i in group] if Surface.contiguous(polygons): cop_cont.append(polygons) substituted.append(group) if len(substituted) != 0: self.save() if plot: self.plot() substituted = sum(substituted) # Hull merged = [] for polygons in cop_cont: points = np.concatenate([polygon.points for polygon in polygons]) hull = ConvexHull(points[:, :2]) merged.append(Polygon(points[hull.vertices])) # Final substitution new_surface = [self[i] for i in range(len(self.polygons)) if i not in substituted] new_surface += merged self.polygons = new_surface self.sorted_areas = None if plot: self.plot()
[ "def", "melt", "(", "self", ",", "plot", "=", "False", ")", ":", "from", "pyny3d", ".", "utils", "import", "bool2index", "from", "scipy", ".", "spatial", "import", "ConvexHull", "# First, coplanarity\r", "## Normalize parametric equations\r", "para", "=", "[", "...
Find and merge groups of polygons in the surface that meet the following criteria: * Are coplanars. * Are contiguous. * The result is convex. This method is very useful at reducing the number the items and, therefore, the shadowing time computing. Before override this instance, it is saved and can be restored with ``.restore()`` :param plot: If True, generates the before and after visualizations for the surface. Use it to check the results. :type plot: bool :returns: None .. warning:: This method do not check if the merged polygons are actually convex. The convex hull of the union is directly calculated. For this reason, it is very important to visualy check the solution.
[ "Find", "and", "merge", "groups", "of", "polygons", "in", "the", "surface", "that", "meet", "the", "following", "criteria", ":", "*", "Are", "coplanars", ".", "*", "Are", "contiguous", ".", "*", "The", "result", "is", "convex", ".", "This", "method", "is...
python
train
38.611111
StanfordBioinformatics/loom
server/loomengine_server/api/async.py
https://github.com/StanfordBioinformatics/loom/blob/db2031a1a87124fee1aeb7414a668c03d774a698/server/loomengine_server/api/async.py#L66-L81
def check_for_missed_cleanup(): """Check for TaskAttempts that were never cleaned up """ if get_setting('PRESERVE_ALL'): return from api.models.tasks import TaskAttempt if get_setting('PRESERVE_ON_FAILURE'): for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter( status_is_cleaned_up=False).exclude( status_is_failed=True): task_attempt.cleanup() else: for task_attempt in TaskAttempt.objects.filter( status_is_running=False).filter(status_is_cleaned_up=False): task_attempt.cleanup()
[ "def", "check_for_missed_cleanup", "(", ")", ":", "if", "get_setting", "(", "'PRESERVE_ALL'", ")", ":", "return", "from", "api", ".", "models", ".", "tasks", "import", "TaskAttempt", "if", "get_setting", "(", "'PRESERVE_ON_FAILURE'", ")", ":", "for", "task_attem...
Check for TaskAttempts that were never cleaned up
[ "Check", "for", "TaskAttempts", "that", "were", "never", "cleaned", "up" ]
python
train
40.1875
AguaClara/aguaclara
aguaclara/core/physchem.py
https://github.com/AguaClara/aguaclara/blob/8dd4e734768b166a7fc2b60388a24df2f93783fc/aguaclara/core/physchem.py#L681-L685
def vel_horizontal(HeightWaterCritical): """Return the horizontal velocity.""" #Checking input validity ut.check_range([HeightWaterCritical, ">0", "Critical height of water"]) return np.sqrt(gravity.magnitude * HeightWaterCritical)
[ "def", "vel_horizontal", "(", "HeightWaterCritical", ")", ":", "#Checking input validity", "ut", ".", "check_range", "(", "[", "HeightWaterCritical", ",", "\">0\"", ",", "\"Critical height of water\"", "]", ")", "return", "np", ".", "sqrt", "(", "gravity", ".", "m...
Return the horizontal velocity.
[ "Return", "the", "horizontal", "velocity", "." ]
python
train
48.6
saltstack/salt
salt/modules/event.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/event.py#L27-L31
def _dict_subset(keys, master_dict): ''' Return a dictionary of only the subset of keys/values specified in keys ''' return dict([(k, v) for k, v in six.iteritems(master_dict) if k in keys])
[ "def", "_dict_subset", "(", "keys", ",", "master_dict", ")", ":", "return", "dict", "(", "[", "(", "k", ",", "v", ")", "for", "k", ",", "v", "in", "six", ".", "iteritems", "(", "master_dict", ")", "if", "k", "in", "keys", "]", ")" ]
Return a dictionary of only the subset of keys/values specified in keys
[ "Return", "a", "dictionary", "of", "only", "the", "subset", "of", "keys", "/", "values", "specified", "in", "keys" ]
python
train
40.4
libtcod/python-tcod
tcod/libtcodpy.py
https://github.com/libtcod/python-tcod/blob/8ba10c5cfb813eaf3e834de971ba2d6acb7838e4/tcod/libtcodpy.py#L1934-L1950
def console_list_load_xp( filename: str ) -> Optional[List[tcod.console.Console]]: """Return a list of consoles from a REXPaint `.xp` file.""" tcod_list = lib.TCOD_console_list_from_xp(filename.encode("utf-8")) if tcod_list == ffi.NULL: return None try: python_list = [] lib.TCOD_list_reverse(tcod_list) while not lib.TCOD_list_is_empty(tcod_list): python_list.append( tcod.console.Console._from_cdata(lib.TCOD_list_pop(tcod_list)) ) return python_list finally: lib.TCOD_list_delete(tcod_list)
[ "def", "console_list_load_xp", "(", "filename", ":", "str", ")", "->", "Optional", "[", "List", "[", "tcod", ".", "console", ".", "Console", "]", "]", ":", "tcod_list", "=", "lib", ".", "TCOD_console_list_from_xp", "(", "filename", ".", "encode", "(", "\"u...
Return a list of consoles from a REXPaint `.xp` file.
[ "Return", "a", "list", "of", "consoles", "from", "a", "REXPaint", ".", "xp", "file", "." ]
python
train
34.647059
ofir123/py-printer
pyprinter/printer.py
https://github.com/ofir123/py-printer/blob/876c83b32120f3b6a7b06989b2cd9b86915d1a50/pyprinter/printer.py#L181-L233
def write(self, text: str): """ Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print. """ # Default color is NORMAL. last_color = (self._DARK_CODE, 0) # We use splitlines with keepends in order to keep the line breaks. # Then we split by using the console width. original_lines = text.splitlines(True) lines = self._split_lines(original_lines) if self._width_limit else original_lines # Print the new width-formatted lines. for line in lines: # Print indents only at line beginnings. if not self._in_line: self._writer.write(' ' * self.indents_sum) # Remove colors if needed. if not self._colors: for color_code in self._ANSI_REGEXP.findall(line): line = line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') elif not self._ANSI_REGEXP.match(line): # Check if the line starts with a color. If not, we apply the color from the last line. line = self._ANSI_COLOR_CODE % (last_color[0], int(last_color[1])) + line # Print the final line. self._writer.write(line) # Update the in_line status. self._in_line = not line.endswith(self.LINE_SEP) # Update the last color used. if self._colors: last_color = self._ANSI_REGEXP.findall(line)[-1] # Update last position (if there was no line break in the end). if len(lines) > 0: last_line = lines[-1] if not last_line.endswith(self.LINE_SEP): # Strip the colors to figure out the real number of characters in the line. if self._colors: for color_code in self._ANSI_REGEXP.findall(last_line): last_line = last_line.replace(self._ANSI_COLOR_CODE % (color_code[0], int(color_code[1])), '') self._last_position += len(last_line) else: self._last_position = 0 self._is_first_line = False else: self._last_position = 0 # Reset colors for the next print. if self._colors and not text.endswith(self.NORMAL): self._writer.write(self.NORMAL)
[ "def", "write", "(", "self", ",", "text", ":", "str", ")", ":", "# Default color is NORMAL.", "last_color", "=", "(", "self", ".", "_DARK_CODE", ",", "0", ")", "# We use splitlines with keepends in order to keep the line breaks.", "# Then we split by using the console width...
Prints text to the screen. Supports colors by using the color constants. To use colors, add the color before the text you want to print. :param text: The text to print.
[ "Prints", "text", "to", "the", "screen", ".", "Supports", "colors", "by", "using", "the", "color", "constants", ".", "To", "use", "colors", "add", "the", "color", "before", "the", "text", "you", "want", "to", "print", "." ]
python
train
45.811321
cggh/scikit-allel
allel/model/ndarray.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/model/ndarray.py#L156-L173
def query(self, expression, vm='python'): """Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array """ condition = self.eval(expression, vm=vm) return self.compress(condition)
[ "def", "query", "(", "self", ",", "expression", ",", "vm", "=", "'python'", ")", ":", "condition", "=", "self", ".", "eval", "(", "expression", ",", "vm", "=", "vm", ")", "return", "self", ".", "compress", "(", "condition", ")" ]
Evaluate expression and then use it to extract rows from the table. Parameters ---------- expression : string Expression to evaluate. vm : {'numexpr', 'python'} Virtual machine to use. Returns ------- result : structured array
[ "Evaluate", "expression", "and", "then", "use", "it", "to", "extract", "rows", "from", "the", "table", "." ]
python
train
24.833333
fermiPy/fermipy
fermipy/wcs_utils.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/wcs_utils.py#L68-L91
def distance_to_edge(self, skydir): """Return the angular distance from the given direction and the edge of the projection.""" xpix, ypix = skydir.to_pixel(self.wcs, origin=0) deltax = np.array((xpix - self._pix_center[0]) * self._pix_size[0], ndmin=1) deltay = np.array((ypix - self._pix_center[1]) * self._pix_size[1], ndmin=1) deltax = np.abs(deltax) - 0.5 * self._width[0] deltay = np.abs(deltay) - 0.5 * self._width[1] m0 = (deltax < 0) & (deltay < 0) m1 = (deltax > 0) & (deltay < 0) m2 = (deltax < 0) & (deltay > 0) m3 = (deltax > 0) & (deltay > 0) mx = np.abs(deltax) <= np.abs(deltay) my = np.abs(deltay) < np.abs(deltax) delta = np.zeros(len(deltax)) delta[(m0 & mx) | (m3 & my) | m1] = deltax[(m0 & mx) | (m3 & my) | m1] delta[(m0 & my) | (m3 & mx) | m2] = deltay[(m0 & my) | (m3 & mx) | m2] return delta
[ "def", "distance_to_edge", "(", "self", ",", "skydir", ")", ":", "xpix", ",", "ypix", "=", "skydir", ".", "to_pixel", "(", "self", ".", "wcs", ",", "origin", "=", "0", ")", "deltax", "=", "np", ".", "array", "(", "(", "xpix", "-", "self", ".", "_...
Return the angular distance from the given direction and the edge of the projection.
[ "Return", "the", "angular", "distance", "from", "the", "given", "direction", "and", "the", "edge", "of", "the", "projection", "." ]
python
train
41
ampl/amplpy
amplpy/parameter.py
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/parameter.py#L98-L138
def setValues(self, values): """ Assign the values (string or float) to the parameter instances with the specified indices, equivalent to the AMPL code: .. code-block:: ampl let {i in indices} par[i] := values[i]; Args: values: list, dictionary or :class:`~amplpy.DataFrame` with the indices and the values to be set. Raises: TypeError: If called on a scalar parameter. """ if isinstance(values, dict): indices, values = list(zip(*values.items())) indices = Utils.toTupleArray(indices) if any(isinstance(value, basestring) for value in values): values = list(map(str, values)) self._impl.setValuesTaStr(indices, values, len(values)) elif all(isinstance(value, Real) for value in values): values = list(map(float, values)) self._impl.setValuesTaDbl(indices, values, len(values)) else: raise TypeError elif isinstance(values, (list, tuple)): if any(isinstance(value, basestring) for value in values): values = list(map(str, values)) self._impl.setValuesStr(values, len(values)) elif all(isinstance(value, Real) for value in values): values = list(map(float, values)) self._impl.setValuesDbl(values, len(values)) else: raise TypeError else: if np is not None and isinstance(values, np.ndarray): self.setValues(DataFrame.fromNumpy(values).toList()) return Entity.setValues(self, values)
[ "def", "setValues", "(", "self", ",", "values", ")", ":", "if", "isinstance", "(", "values", ",", "dict", ")", ":", "indices", ",", "values", "=", "list", "(", "zip", "(", "*", "values", ".", "items", "(", ")", ")", ")", "indices", "=", "Utils", ...
Assign the values (string or float) to the parameter instances with the specified indices, equivalent to the AMPL code: .. code-block:: ampl let {i in indices} par[i] := values[i]; Args: values: list, dictionary or :class:`~amplpy.DataFrame` with the indices and the values to be set. Raises: TypeError: If called on a scalar parameter.
[ "Assign", "the", "values", "(", "string", "or", "float", ")", "to", "the", "parameter", "instances", "with", "the", "specified", "indices", "equivalent", "to", "the", "AMPL", "code", ":" ]
python
train
41.121951
proteanhq/protean
src/protean/core/queryset.py
https://github.com/proteanhq/protean/blob/0e29873f4aa634aa93cc08ed675dd749c7ed4b0f/src/protean/core/queryset.py#L243-L264
def update_all(self, *args, **kwargs): """Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value). """ updated_item_count = 0 repository = repo_factory.get_repository(self._entity_cls) try: updated_item_count = repository.update_all(self._criteria, *args, **kwargs) except Exception: # FIXME Log Exception raise return updated_item_count
[ "def", "update_all", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "updated_item_count", "=", "0", "repository", "=", "repo_factory", ".", "get_repository", "(", "self", ".", "_entity_cls", ")", "try", ":", "updated_item_count", "=", "r...
Updates all objects with details given if they match a set of conditions supplied. This method forwards filters and updates directly to the repository. It does not instantiate entities and it does not trigger Entity callbacks or validations. Update values can be specified either as a dict, or keyword arguments. Returns the number of objects matched (which may not be equal to the number of objects updated if objects rows already have the new value).
[ "Updates", "all", "objects", "with", "details", "given", "if", "they", "match", "a", "set", "of", "conditions", "supplied", "." ]
python
train
38.818182
dmlc/gluon-nlp
scripts/bert/run_pretraining_hvd.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/run_pretraining_hvd.py#L71-L204
def train(data_train, model, nsp_loss, mlm_loss, vocab_size, ctx): """Training function.""" hvd.broadcast_parameters(model.collect_params(), root_rank=0) mlm_metric = nlp.metric.MaskedAccuracy() nsp_metric = nlp.metric.MaskedAccuracy() mlm_metric.reset() nsp_metric.reset() logging.debug('Creating distributed trainer...') lr = args.lr optim_params = {'learning_rate': lr, 'epsilon': 1e-6, 'wd': 0.01} if args.dtype == 'float16': optim_params['multi_precision'] = True dynamic_loss_scale = args.dtype == 'float16' if dynamic_loss_scale: loss_scale_param = {'scale_window': 2000 / num_workers} else: loss_scale_param = None trainer = hvd.DistributedTrainer(model.collect_params(), 'bertadam', optim_params) fp16_trainer = FP16Trainer(trainer, dynamic_loss_scale=dynamic_loss_scale, loss_scaler_params=loss_scale_param) if args.ckpt_dir and args.start_step: trainer.load_states(os.path.join(args.ckpt_dir, '%07d.states'%args.start_step)) accumulate = args.accumulate num_train_steps = args.num_steps warmup_ratio = args.warmup_ratio num_warmup_steps = int(num_train_steps * warmup_ratio) params = [p for p in model.collect_params().values() if p.grad_req != 'null'] param_dict = model.collect_params() # Do not apply weight decay on LayerNorm and bias terms for _, v in model.collect_params('.*beta|.*gamma|.*bias').items(): v.wd_mult = 0.0 if accumulate > 1: for p in params: p.grad_req = 'add' train_begin_time = time.time() begin_time = time.time() running_mlm_loss, running_nsp_loss = 0, 0 running_num_tks = 0 batch_num = 0 step_num = args.start_step logging.debug('Training started') while step_num < num_train_steps: for _, dataloader in enumerate(data_train): if step_num >= num_train_steps: break # create dummy data loader if needed if args.dummy_data_len: target_shape = (args.batch_size, args.dummy_data_len) dataloader = get_dummy_dataloader(dataloader, target_shape) for _, data_batch in enumerate(dataloader): if step_num >= num_train_steps: break if batch_num % accumulate == 0: step_num += 1 # if accumulate > 1, grad_req is set to 'add', and zero_grad is required if accumulate > 1: param_dict.zero_grad() # update learning rate if step_num <= num_warmup_steps: new_lr = lr * step_num / num_warmup_steps else: offset = lr * step_num / num_train_steps new_lr = lr - offset trainer.set_learning_rate(new_lr) if args.profile: profile(step_num, 10, 14, profile_name=args.profile + str(rank)) # load data if args.use_avg_len: data_list = [[seq.as_in_context(context) for seq in shard] for context, shard in zip([ctx], data_batch)] else: data_list = list(split_and_load(data_batch, [ctx])) data = data_list[0] # forward with mx.autograd.record(): (ls, ns_label, classified, masked_id, decoded, \ masked_weight, ls1, ls2, valid_len) = forward(data, model, mlm_loss, nsp_loss, vocab_size, args.dtype) ls = ls / accumulate # backward if args.dtype == 'float16': fp16_trainer.backward(ls) else: ls.backward() running_mlm_loss += ls1.as_in_context(mx.cpu()) running_nsp_loss += ls2.as_in_context(mx.cpu()) running_num_tks += valid_len.sum().as_in_context(mx.cpu()) # update if (batch_num + 1) % accumulate == 0: # step() performs 3 things: # 1. allreduce gradients from all workers # 2. checking the global_norm of gradients and clip them if necessary # 3. averaging the gradients and apply updates fp16_trainer.step(1, max_norm=1*num_workers) nsp_metric.update([ns_label], [classified]) mlm_metric.update([masked_id], [decoded], [masked_weight]) # logging if (step_num + 1) % (args.log_interval) == 0 and (batch_num + 1) % accumulate == 0: log(begin_time, running_num_tks, running_mlm_loss / accumulate, running_nsp_loss / accumulate, step_num, mlm_metric, nsp_metric, trainer, args.log_interval) begin_time = time.time() running_mlm_loss = running_nsp_loss = running_num_tks = 0 mlm_metric.reset_local() nsp_metric.reset_local() # saving checkpoints if args.ckpt_dir and (step_num + 1) % (args.ckpt_interval) == 0 \ and (batch_num + 1) % accumulate == 0 and local_rank == 0: save_params(step_num, model, trainer, args.ckpt_dir) batch_num += 1 if local_rank == 0: save_params(step_num, model, trainer, args.ckpt_dir) mx.nd.waitall() train_end_time = time.time() logging.info('Train cost={:.1f}s'.format(train_end_time - train_begin_time))
[ "def", "train", "(", "data_train", ",", "model", ",", "nsp_loss", ",", "mlm_loss", ",", "vocab_size", ",", "ctx", ")", ":", "hvd", ".", "broadcast_parameters", "(", "model", ".", "collect_params", "(", ")", ",", "root_rank", "=", "0", ")", "mlm_metric", ...
Training function.
[ "Training", "function", "." ]
python
train
42.507463
secdev/scapy
scapy/layers/l2.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/l2.py#L70-L102
def getmacbyip(ip, chainCC=0): """Return MAC address corresponding to a given IP address""" if isinstance(ip, Net): ip = next(iter(ip)) ip = inet_ntoa(inet_aton(ip or "0.0.0.0")) tmp = [orb(e) for e in inet_aton(ip)] if (tmp[0] & 0xf0) == 0xe0: # mcast @ return "01:00:5e:%.2x:%.2x:%.2x" % (tmp[1] & 0x7f, tmp[2], tmp[3]) iff, _, gw = conf.route.route(ip) if ((iff == consts.LOOPBACK_INTERFACE) or (ip == conf.route.get_if_bcast(iff))): # noqa: E501 return "ff:ff:ff:ff:ff:ff" if gw != "0.0.0.0": ip = gw mac = conf.netcache.arp_cache.get(ip) if mac: return mac try: res = srp1(Ether(dst=ETHER_BROADCAST) / ARP(op="who-has", pdst=ip), type=ETH_P_ARP, iface=iff, timeout=2, verbose=0, chainCC=chainCC, nofilter=1) except Exception: return None if res is not None: mac = res.payload.hwsrc conf.netcache.arp_cache[ip] = mac return mac return None
[ "def", "getmacbyip", "(", "ip", ",", "chainCC", "=", "0", ")", ":", "if", "isinstance", "(", "ip", ",", "Net", ")", ":", "ip", "=", "next", "(", "iter", "(", "ip", ")", ")", "ip", "=", "inet_ntoa", "(", "inet_aton", "(", "ip", "or", "\"0.0.0.0\""...
Return MAC address corresponding to a given IP address
[ "Return", "MAC", "address", "corresponding", "to", "a", "given", "IP", "address" ]
python
train
32.272727
blockstack/blockstack-core
blockstack/lib/client.py
https://github.com/blockstack/blockstack-core/blob/1dcfdd39b152d29ce13e736a6a1a0981401a0505/blockstack/lib/client.py#L3100-L3138
def get_name_history(name, hostport=None, proxy=None, history_page=None): """ Get the full history of a name Returns {'status': True, 'history': ...} on success, where history is grouped by block Returns {'error': ...} on error """ assert hostport or proxy, 'Need hostport or proxy' if proxy is None: proxy = connect_hostport(hostport) hist = {} indexing = None lastblock = None if history_page != None: resp = get_name_history_page(name, history_page, proxy=proxy) if 'error' in resp: return resp indexing = resp['indexing'] lastblock = resp['lastblock'] return {'status': True, 'history': resp['history'], 'indexing': indexing, 'lastblock': lastblock} for i in range(0, 100000000): # this is obviously too big resp = get_name_history_page(name, i, proxy=proxy) if 'error' in resp: return resp indexing = resp['indexing'] lastblock = resp['lastblock'] if len(resp['history']) == 0: # caught up break hist = name_history_merge(hist, resp['history']) return {'status': True, 'history': hist, 'indexing': indexing, 'lastblock': lastblock}
[ "def", "get_name_history", "(", "name", ",", "hostport", "=", "None", ",", "proxy", "=", "None", ",", "history_page", "=", "None", ")", ":", "assert", "hostport", "or", "proxy", ",", "'Need hostport or proxy'", "if", "proxy", "is", "None", ":", "proxy", "=...
Get the full history of a name Returns {'status': True, 'history': ...} on success, where history is grouped by block Returns {'error': ...} on error
[ "Get", "the", "full", "history", "of", "a", "name", "Returns", "{", "status", ":", "True", "history", ":", "...", "}", "on", "success", "where", "history", "is", "grouped", "by", "block", "Returns", "{", "error", ":", "...", "}", "on", "error" ]
python
train
31.025641
oceanprotocol/oceandb-elasticsearch-driver
oceandb_elasticsearch_driver/plugin.py
https://github.com/oceanprotocol/oceandb-elasticsearch-driver/blob/11901e8396252b9dbb70fd48debcfa82f1dd1ff2/oceandb_elasticsearch_driver/plugin.py#L136-L170
def query(self, search_model: QueryModel): """Query elasticsearch for objects. :param search_model: object of QueryModel. :return: list of objects that match the query. """ query_parsed = query_parser(search_model.query) self.logger.debug(f'elasticsearch::query::{query_parsed[0]}') if search_model.sort is not None: self._mapping_to_sort(search_model.sort.keys()) sort = self._sort_object(search_model.sort) else: sort = [{"_id": "asc"}] if search_model.query == {}: query = {'match_all': {}} else: query = query_parsed[0] body = { 'query': query, 'sort': sort, 'from': (search_model.page - 1) * search_model.offset, 'size': search_model.offset, } page = self.driver._es.search( index=self.driver._index, doc_type='_doc', body=body, q=query_parsed[1] ) object_list = [] for x in page['hits']['hits']: object_list.append(x['_source']) return object_list
[ "def", "query", "(", "self", ",", "search_model", ":", "QueryModel", ")", ":", "query_parsed", "=", "query_parser", "(", "search_model", ".", "query", ")", "self", ".", "logger", ".", "debug", "(", "f'elasticsearch::query::{query_parsed[0]}'", ")", "if", "search...
Query elasticsearch for objects. :param search_model: object of QueryModel. :return: list of objects that match the query.
[ "Query", "elasticsearch", "for", "objects", ".", ":", "param", "search_model", ":", "object", "of", "QueryModel", ".", ":", "return", ":", "list", "of", "objects", "that", "match", "the", "query", "." ]
python
test
32.171429
mitsei/dlkit
dlkit/json_/osid/metadata.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/osid/metadata.py#L578-L593
def supports_calendar_type(self, calendar_type): """Tests if the given calendar type is supported. arg: calendar_type (osid.type.Type): a calendar Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``DATETIME`` or ``DURATION`` raise: NullArgument - ``calendar_type`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.Metadata.supports_coordinate_type if self._kwargs['syntax'] not in ['``DATETIME``', '``DURATION``']: raise errors.IllegalState() return calendar_type in self.get_calendar_types
[ "def", "supports_calendar_type", "(", "self", ",", "calendar_type", ")", ":", "# Implemented from template for osid.Metadata.supports_coordinate_type", "if", "self", ".", "_kwargs", "[", "'syntax'", "]", "not", "in", "[", "'``DATETIME``'", ",", "'``DURATION``'", "]", ":...
Tests if the given calendar type is supported. arg: calendar_type (osid.type.Type): a calendar Type return: (boolean) - ``true`` if the type is supported, ``false`` otherwise raise: IllegalState - syntax is not a ``DATETIME`` or ``DURATION`` raise: NullArgument - ``calendar_type`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Tests", "if", "the", "given", "calendar", "type", "is", "supported", "." ]
python
train
46.1875
numenta/nupic
src/nupic/datafiles/extra/gym/raw/makeDataset.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/datafiles/extra/gym/raw/makeDataset.py#L124-L156
def processClubAttendance(f, clubs): """Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process. """ try: # Skip as many empty lines as necessary (file format inconsistent) line = f.next() while line == ',,,,,,,,,,,,,,,,,,,\n': line = f.next() # The first non-empty line should have the name as the first field name = line.split(',')[0] # Create a new club object if needed if name not in clubs: clubs[name] = Club(name) # Get the named club c = clubs[name] c.processAttendance(f) return True except StopIteration: return False
[ "def", "processClubAttendance", "(", "f", ",", "clubs", ")", ":", "try", ":", "# Skip as many empty lines as necessary (file format inconsistent)", "line", "=", "f", ".", "next", "(", ")", "while", "line", "==", "',,,,,,,,,,,,,,,,,,,\\n'", ":", "line", "=", "f", "...
Process the attendance data of one club If the club already exists in the list update its data. If the club is new create a new Club object and add it to the dict The next step is to iterate over all the lines and add a record for each line. When reaching an empty line it means there are no more records for this club. Along the way some redundant lines are skipped. When the file ends the f.next() call raises a StopIteration exception and that's the sign to return False, which indicates to the caller that there are no more clubs to process.
[ "Process", "the", "attendance", "data", "of", "one", "club", "If", "the", "club", "already", "exists", "in", "the", "list", "update", "its", "data", ".", "If", "the", "club", "is", "new", "create", "a", "new", "Club", "object", "and", "add", "it", "to"...
python
valid
33.363636
ssato/python-anytemplate
anytemplate/engines/cheetah.py
https://github.com/ssato/python-anytemplate/blob/3e56baa914bd47f044083b20e33100f836443596/anytemplate/engines/cheetah.py#L68-L76
def supports(cls, template_file=None): """ :return: Whether the engine can process given template file or not. """ if anytemplate.compat.IS_PYTHON_3: cls._priority = 99 return False # Always as it's not ported to python 3. return super(Engine, cls).supports(template_file=template_file)
[ "def", "supports", "(", "cls", ",", "template_file", "=", "None", ")", ":", "if", "anytemplate", ".", "compat", ".", "IS_PYTHON_3", ":", "cls", ".", "_priority", "=", "99", "return", "False", "# Always as it's not ported to python 3.", "return", "super", "(", ...
:return: Whether the engine can process given template file or not.
[ ":", "return", ":", "Whether", "the", "engine", "can", "process", "given", "template", "file", "or", "not", "." ]
python
train
38.222222
petl-developers/petl
petl/transform/intervals.py
https://github.com/petl-developers/petl/blob/1d33ca055f7e04e0d28a772041c9fd30c8d415d6/petl/transform/intervals.py#L91-L108
def facetrecordtrees(table, key, start='start', stop='stop'): """ Construct faceted interval trees for the given table, where each node in the tree is a record. """ import intervaltree getstart = attrgetter(start) getstop = attrgetter(stop) getkey = attrgetter(key) trees = dict() for rec in records(table): k = getkey(rec) if k not in trees: trees[k] = intervaltree.IntervalTree() trees[k].addi(getstart(rec), getstop(rec), rec) return trees
[ "def", "facetrecordtrees", "(", "table", ",", "key", ",", "start", "=", "'start'", ",", "stop", "=", "'stop'", ")", ":", "import", "intervaltree", "getstart", "=", "attrgetter", "(", "start", ")", "getstop", "=", "attrgetter", "(", "stop", ")", "getkey", ...
Construct faceted interval trees for the given table, where each node in the tree is a record.
[ "Construct", "faceted", "interval", "trees", "for", "the", "given", "table", "where", "each", "node", "in", "the", "tree", "is", "a", "record", "." ]
python
train
28.222222
niolabs/python-xbee
xbee/thread/base.py
https://github.com/niolabs/python-xbee/blob/b91be3d0ee7ccaa1990120b5b5490999d8e6cbc7/xbee/thread/base.py#L67-L77
def halt(self): """ halt: None -> None If this instance has a separate thread running, it will be halted. This method will wait until the thread has cleaned up before returning. """ if self._callback: self._thread_continue = False self._thread.join()
[ "def", "halt", "(", "self", ")", ":", "if", "self", ".", "_callback", ":", "self", ".", "_thread_continue", "=", "False", "self", ".", "_thread", ".", "join", "(", ")" ]
halt: None -> None If this instance has a separate thread running, it will be halted. This method will wait until the thread has cleaned up before returning.
[ "halt", ":", "None", "-", ">", "None" ]
python
train
29.181818
twilio/twilio-python
twilio/rest/api/v2010/account/available_phone_number/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/available_phone_number/__init__.py#L410-L424
def _proxy(self): """ Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext """ if self._context is None: self._context = AvailablePhoneNumberCountryContext( self._version, account_sid=self._solution['account_sid'], country_code=self._solution['country_code'], ) return self._context
[ "def", "_proxy", "(", "self", ")", ":", "if", "self", ".", "_context", "is", "None", ":", "self", ".", "_context", "=", "AvailablePhoneNumberCountryContext", "(", "self", ".", "_version", ",", "account_sid", "=", "self", ".", "_solution", "[", "'account_sid'...
Generate an instance context for the instance, the context is capable of performing various actions. All instance actions are proxied to the context :returns: AvailablePhoneNumberCountryContext for this AvailablePhoneNumberCountryInstance :rtype: twilio.rest.api.v2010.account.available_phone_number.AvailablePhoneNumberCountryContext
[ "Generate", "an", "instance", "context", "for", "the", "instance", "the", "context", "is", "capable", "of", "performing", "various", "actions", ".", "All", "instance", "actions", "are", "proxied", "to", "the", "context" ]
python
train
45.866667
kamikaze/webdav
src/webdav/client.py
https://github.com/kamikaze/webdav/blob/6facff7224023d3e28c8e1592f3c58401c91a0e6/src/webdav/client.py#L554-L570
def copy(self, remote_path_from, remote_path_to): """Copies resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_COPY :param remote_path_from: the path to resource which will be copied, :param remote_path_to: the path where resource will be copied. """ urn_from = Urn(remote_path_from) if not self.check(urn_from.path()): raise RemoteResourceNotFound(urn_from.path()) urn_to = Urn(remote_path_to) if not self.check(urn_to.parent()): raise RemoteParentNotFound(urn_to.path()) header_destination = f'Destination: {self.get_full_path(urn_to)}' self.execute_request(action='copy', path=urn_from.quote(), headers_ext=[header_destination])
[ "def", "copy", "(", "self", ",", "remote_path_from", ",", "remote_path_to", ")", ":", "urn_from", "=", "Urn", "(", "remote_path_from", ")", "if", "not", "self", ".", "check", "(", "urn_from", ".", "path", "(", ")", ")", ":", "raise", "RemoteResourceNotFoun...
Copies resource from one place to another on WebDAV server. More information you can find by link http://webdav.org/specs/rfc4918.html#METHOD_COPY :param remote_path_from: the path to resource which will be copied, :param remote_path_to: the path where resource will be copied.
[ "Copies", "resource", "from", "one", "place", "to", "another", "on", "WebDAV", "server", ".", "More", "information", "you", "can", "find", "by", "link", "http", ":", "//", "webdav", ".", "org", "/", "specs", "/", "rfc4918", ".", "html#METHOD_COPY" ]
python
train
47.882353
django-userena-ce/django-userena-ce
userena/forms.py
https://github.com/django-userena-ce/django-userena-ce/blob/2d8b745eed25128134e961ca96c270802e730256/userena/forms.py#L110-L119
def save(self): """ Generate a random username before falling back to parent signup form """ while True: username = sha1(str(random.random()).encode('utf-8')).hexdigest()[:5] try: get_user_model().objects.get(username__iexact=username) except get_user_model().DoesNotExist: break self.cleaned_data['username'] = username return super(SignupFormOnlyEmail, self).save()
[ "def", "save", "(", "self", ")", ":", "while", "True", ":", "username", "=", "sha1", "(", "str", "(", "random", ".", "random", "(", ")", ")", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")", "[", ":", "5", "]", "try", ":", ...
Generate a random username before falling back to parent signup form
[ "Generate", "a", "random", "username", "before", "falling", "back", "to", "parent", "signup", "form" ]
python
train
44.3
singularityhub/sregistry-cli
sregistry/main/base/settings.py
https://github.com/singularityhub/sregistry-cli/blob/abc96140a1d15b5e96d83432e1e0e1f4f8f36331/sregistry/main/base/settings.py#L62-L89
def get_and_update_setting(self, name, default=None): '''Look for a setting in the environment (first priority) and then the settings file (second). If something is found, the settings file is updated. The order of operations works as follows: 1. The .sregistry settings file is used as a cache for the variable 2. the environment variable always takes priority to cache, and if found, will update the cache. 3. If the variable is not found and the cache is set, we are good 5. If the variable is not found and the cache isn't set, return default (default is None) So the user of the function can assume a return of None equates to not set anywhere, and take the appropriate action. ''' setting = self._get_setting(name) if setting is None and default is not None: setting = default # If the setting is found, update the client secrets if setting is not None: updates = {name : setting} update_client_secrets(backend=self.client_name, updates=updates) return setting
[ "def", "get_and_update_setting", "(", "self", ",", "name", ",", "default", "=", "None", ")", ":", "setting", "=", "self", ".", "_get_setting", "(", "name", ")", "if", "setting", "is", "None", "and", "default", "is", "not", "None", ":", "setting", "=", ...
Look for a setting in the environment (first priority) and then the settings file (second). If something is found, the settings file is updated. The order of operations works as follows: 1. The .sregistry settings file is used as a cache for the variable 2. the environment variable always takes priority to cache, and if found, will update the cache. 3. If the variable is not found and the cache is set, we are good 5. If the variable is not found and the cache isn't set, return default (default is None) So the user of the function can assume a return of None equates to not set anywhere, and take the appropriate action.
[ "Look", "for", "a", "setting", "in", "the", "environment", "(", "first", "priority", ")", "and", "then", "the", "settings", "file", "(", "second", ")", ".", "If", "something", "is", "found", "the", "settings", "file", "is", "updated", ".", "The", "order"...
python
test
39.464286
aio-libs/aioredis
aioredis/commands/sorted_set.py
https://github.com/aio-libs/aioredis/blob/e8c33e39558d4cc91cf70dde490d8b330c97dc2e/aioredis/commands/sorted_set.py#L51-L66
def zcount(self, key, min=float('-inf'), max=float('inf'), *, exclude=None): """Count the members in a sorted set with scores within the given values. :raises TypeError: min or max is not float or int :raises ValueError: if min greater than max """ if not isinstance(min, (int, float)): raise TypeError("min argument must be int or float") if not isinstance(max, (int, float)): raise TypeError("max argument must be int or float") if min > max: raise ValueError("min could not be greater than max") return self.execute(b'ZCOUNT', key, *_encode_min_max(exclude, min, max))
[ "def", "zcount", "(", "self", ",", "key", ",", "min", "=", "float", "(", "'-inf'", ")", ",", "max", "=", "float", "(", "'inf'", ")", ",", "*", ",", "exclude", "=", "None", ")", ":", "if", "not", "isinstance", "(", "min", ",", "(", "int", ",", ...
Count the members in a sorted set with scores within the given values. :raises TypeError: min or max is not float or int :raises ValueError: if min greater than max
[ "Count", "the", "members", "in", "a", "sorted", "set", "with", "scores", "within", "the", "given", "values", "." ]
python
train
44.25
Accelize/pycosio
pycosio/storage/azure.py
https://github.com/Accelize/pycosio/blob/1cc1f8fdf5394d92918b7bae2bfa682169ccc48c/pycosio/storage/azure.py#L199-L217
def _model_to_dict(obj): """ Convert object model to dict. Args: obj: Object model. Returns: dict: Converted model. """ result = _properties_model_to_dict(obj.properties) for attribute in ('metadata', 'snapshot'): try: value = getattr(obj, attribute) except AttributeError: continue if value: result[attribute] = value return result
[ "def", "_model_to_dict", "(", "obj", ")", ":", "result", "=", "_properties_model_to_dict", "(", "obj", ".", "properties", ")", "for", "attribute", "in", "(", "'metadata'", ",", "'snapshot'", ")", ":", "try", ":", "value", "=", "getattr", "(", "obj", ",", ...
Convert object model to dict. Args: obj: Object model. Returns: dict: Converted model.
[ "Convert", "object", "model", "to", "dict", "." ]
python
train
25.684211
cmcginty/PyWeather
weather/units/temp.py
https://github.com/cmcginty/PyWeather/blob/8c25d9cd1fa921e0a6e460d523656279cac045cb/weather/units/temp.py#L101-L115
def calc_humidity(temp, dewpoint): ''' calculates the humidity via the formula from weatherwise.org return the relative humidity ''' t = fahrenheit_to_celsius(temp) td = fahrenheit_to_celsius(dewpoint) num = 112 - (0.1 * t) + td denom = 112 + (0.9 * t) rh = math.pow((num / denom), 8) return rh
[ "def", "calc_humidity", "(", "temp", ",", "dewpoint", ")", ":", "t", "=", "fahrenheit_to_celsius", "(", "temp", ")", "td", "=", "fahrenheit_to_celsius", "(", "dewpoint", ")", "num", "=", "112", "-", "(", "0.1", "*", "t", ")", "+", "td", "denom", "=", ...
calculates the humidity via the formula from weatherwise.org return the relative humidity
[ "calculates", "the", "humidity", "via", "the", "formula", "from", "weatherwise", ".", "org", "return", "the", "relative", "humidity" ]
python
test
21.866667
materialsproject/pymatgen
pymatgen/core/tensors.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/tensors.py#L315-L326
def voigt_symmetrized(self): """ Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices """ if not (self.rank % 2 == 0 and self.rank >= 2): raise ValueError("V-symmetrization requires rank even and >= 2") v = self.voigt perms = list(itertools.permutations(range(len(v.shape)))) new_v = sum([np.transpose(v, ind) for ind in perms]) / len(perms) return self.__class__.from_voigt(new_v)
[ "def", "voigt_symmetrized", "(", "self", ")", ":", "if", "not", "(", "self", ".", "rank", "%", "2", "==", "0", "and", "self", ".", "rank", ">=", "2", ")", ":", "raise", "ValueError", "(", "\"V-symmetrization requires rank even and >= 2\"", ")", "v", "=", ...
Returns a "voigt"-symmetrized tensor, i. e. a voigt-notation tensor such that it is invariant wrt permutation of indices
[ "Returns", "a", "voigt", "-", "symmetrized", "tensor", "i", ".", "e", ".", "a", "voigt", "-", "notation", "tensor", "such", "that", "it", "is", "invariant", "wrt", "permutation", "of", "indices" ]
python
train
43.583333
jmcgeheeiv/pyfakefs
pyfakefs/fake_filesystem.py
https://github.com/jmcgeheeiv/pyfakefs/blob/6c36fb8987108107fc861fc3013620d46c7d2f9c/pyfakefs/fake_filesystem.py#L3795-L3811
def umask(self, new_mask): """Change the current umask. Args: new_mask: (int) The new umask value. Returns: The old umask. Raises: TypeError: if new_mask is of an invalid type. """ if not is_int_type(new_mask): raise TypeError('an integer is required') old_umask = self.filesystem.umask self.filesystem.umask = new_mask return old_umask
[ "def", "umask", "(", "self", ",", "new_mask", ")", ":", "if", "not", "is_int_type", "(", "new_mask", ")", ":", "raise", "TypeError", "(", "'an integer is required'", ")", "old_umask", "=", "self", ".", "filesystem", ".", "umask", "self", ".", "filesystem", ...
Change the current umask. Args: new_mask: (int) The new umask value. Returns: The old umask. Raises: TypeError: if new_mask is of an invalid type.
[ "Change", "the", "current", "umask", "." ]
python
train
26.058824
UCL-INGI/INGInious
inginious/frontend/pages/aggregation.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/aggregation.py#L21-L115
def GET_AUTH(self, courseid): # pylint: disable=arguments-differ """ GET request """ course = self.course_factory.get_course(courseid) username = self.user_manager.session_username() error = False change = False msg = "" data = web.input() if self.user_manager.has_staff_rights_on_course(course): raise web.notfound() elif not self.user_manager.course_is_open_to_user(course, lti=False): return self.template_helper.get_renderer().course_unavailable() elif "register_group" in data: change = True if course.can_students_choose_group() and course.use_classrooms(): aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username}) if int(data["register_group"]) >= 0 and (len(aggregation["groups"]) > int(data["register_group"])): group = aggregation["groups"][int(data["register_group"])] if group["size"] > len(group["students"]): for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) aggregation["groups"][int(data["register_group"])]["students"].append(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) self._logger.info("User %s registered to group %s/%s/%s", username, courseid, aggregation["description"], data["register_group"]) else: error = True msg = _("Couldn't register to the specified group.") elif course.can_students_choose_group(): aggregation = self.database.aggregations.find_one( {"courseid": course.get_id(), "students": username}) if aggregation is not None: aggregation["students"].remove(username) for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) # Add student in the classroom and unique group self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])}, {"$push": {"students": username}}) new_aggregation = self.database.aggregations.find_one_and_update({"_id": ObjectId(data["register_group"])}, {"$push": {"groups.0.students": username}}) if new_aggregation is None: error = True msg = _("Couldn't register to the specified group.") else: self._logger.info("User %s registered to team %s/%s", username, courseid, aggregation["description"]) else: error = True msg = _("You are not allowed to change group.") elif "unregister_group" in data: change = True if course.can_students_choose_group(): aggregation = self.database.aggregations.find_one({"courseid": course.get_id(), "students": username, "groups.students": username}) if aggregation is not None: for index, group in enumerate(aggregation["groups"]): if username in group["students"]: aggregation["groups"][index]["students"].remove(username) self.database.aggregations.replace_one({"courseid": course.get_id(), "students": username}, aggregation) self._logger.info("User %s unregistered from group/team %s/%s", username, courseid, aggregation["description"]) else: error = True msg = _("You're not registered in a group.") else: error = True msg = _("You are not allowed to change group.") tasks = course.get_tasks() last_submissions = self.submission_manager.get_user_last_submissions(5, {"courseid": courseid, "taskid": {"$in": list(tasks.keys())}}) for submission in last_submissions: submission["taskname"] = tasks[submission['taskid']].get_name(self.user_manager.session_language()) aggregation = self.user_manager.get_course_user_aggregation(course) aggregations = self.user_manager.get_course_aggregations(course) users = self.user_manager.get_users_info(self.user_manager.get_course_registered_users(course)) if course.use_classrooms(): mygroup = None for index, group in enumerate(aggregation["groups"]): if self.user_manager.session_username() in group["students"]: mygroup = group mygroup["index"] = index + 1 return self.template_helper.get_renderer().classroom(course, last_submissions, aggregation, users, mygroup, msg, error, change) else: return self.template_helper.get_renderer().team(course, last_submissions, aggregations, users, aggregation, msg, error)
[ "def", "GET_AUTH", "(", "self", ",", "courseid", ")", ":", "# pylint: disable=arguments-differ", "course", "=", "self", ".", "course_factory", ".", "get_course", "(", "courseid", ")", "username", "=", "self", ".", "user_manager", ".", "session_username", "(", ")...
GET request
[ "GET", "request" ]
python
train
58.8
numenta/htmresearch
htmresearch/algorithms/apical_dependent_temporal_memory.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/algorithms/apical_dependent_temporal_memory.py#L530-L552
def _learnOnNewSegments(connections, rng, newSegmentCells, growthCandidates, initialPermanence, sampleSize, maxSynapsesPerSegment): """ Create new segments, and grow synapses on them. @param connections (SparseMatrixConnections) @param rng (Random) @param newSegmentCells (numpy array) @param growthCandidates (numpy array) """ numNewSynapses = len(growthCandidates) if sampleSize != -1: numNewSynapses = min(numNewSynapses, sampleSize) if maxSynapsesPerSegment != -1: numNewSynapses = min(numNewSynapses, maxSynapsesPerSegment) newSegments = connections.createSegments(newSegmentCells) connections.growSynapsesToSample(newSegments, growthCandidates, numNewSynapses, initialPermanence, rng)
[ "def", "_learnOnNewSegments", "(", "connections", ",", "rng", ",", "newSegmentCells", ",", "growthCandidates", ",", "initialPermanence", ",", "sampleSize", ",", "maxSynapsesPerSegment", ")", ":", "numNewSynapses", "=", "len", "(", "growthCandidates", ")", "if", "sam...
Create new segments, and grow synapses on them. @param connections (SparseMatrixConnections) @param rng (Random) @param newSegmentCells (numpy array) @param growthCandidates (numpy array)
[ "Create", "new", "segments", "and", "grow", "synapses", "on", "them", "." ]
python
train
36.217391
sci-bots/svg-model
svg_model/plot.py
https://github.com/sci-bots/svg-model/blob/2d119650f995e62b29ce0b3151a23f3b957cb072/svg_model/plot.py#L138-L169
def plot_color_map_bars(values, vmin=None, vmax=None, color_map=None, axis=None, **kwargs): ''' Plot bar for each value in `values`, colored based on values mapped onto the specified color map. Args ---- values (pandas.Series) : Numeric values to plot one bar per value. axis : A matplotlib axis. If `None`, an axis is created. vmin : Minimum value to clip values at. vmax : Maximum value to clip values at. color_map : A matplotlib color map (see `matplotlib.cm`). **kwargs : Extra keyword arguments to pass to `values.plot`. Returns ------- (axis) : Bar plot axis. ''' if axis is None: fig, axis = plt.subplots() norm = mpl.colors.Normalize(vmin=vmin or min(values), vmax=vmax or max(values), clip=True) if color_map is None: color_map = mpl.rcParams['image.cmap'] colors = color_map(norm(values.values).filled()) values.plot(kind='bar', ax=axis, color=colors, **kwargs) return axis
[ "def", "plot_color_map_bars", "(", "values", ",", "vmin", "=", "None", ",", "vmax", "=", "None", ",", "color_map", "=", "None", ",", "axis", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "axis", "is", "None", ":", "fig", ",", "axis", "=", ...
Plot bar for each value in `values`, colored based on values mapped onto the specified color map. Args ---- values (pandas.Series) : Numeric values to plot one bar per value. axis : A matplotlib axis. If `None`, an axis is created. vmin : Minimum value to clip values at. vmax : Maximum value to clip values at. color_map : A matplotlib color map (see `matplotlib.cm`). **kwargs : Extra keyword arguments to pass to `values.plot`. Returns ------- (axis) : Bar plot axis.
[ "Plot", "bar", "for", "each", "value", "in", "values", "colored", "based", "on", "values", "mapped", "onto", "the", "specified", "color", "map", "." ]
python
train
32.625
kristianfoerster/melodist
melodist/temperature.py
https://github.com/kristianfoerster/melodist/blob/ddc155c77b65f791be0021dbbaf68c6bac42ecbd/melodist/temperature.py#L33-L212
def disaggregate_temperature(data_daily, method='sine_min_max', min_max_time='fix', mod_nighttime=False, max_delta=None, mean_course=None, sun_times=None): """The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times() """ if method not in ( 'sine_min_max', 'sine_mean', 'sine', 'mean_course_min_max', 'mean_course_mean', ): raise ValueError('Invalid option') temp_disagg = pd.Series(index=melodist.util.hourly_index(data_daily.index)) if method in ('sine_min_max', 'sine_mean', 'sine'): # for this option assume time of minimum and maximum and fit cosine function through minimum and maximum temperatures hours_per_day = 24 default_shift_hours = 2 daylength_thres = 3 # min / max hour during polar night assumption min_loc_polar = 6 max_loc_polar = 18 locdf = pd.DataFrame( index=data_daily.index, columns=[ 'min_loc', 'max_loc', 'min_val_before', 'min_val_cur', 'min_val_next', 'max_val_before', 'max_val_cur', 'max_val_next', 'mean_val_cur', ] ) if min_max_time == 'fix': # take fixed location for minimum and maximum locdf.min_loc = 7 locdf.max_loc = 14 elif min_max_time == 'sun_loc': # take location for minimum and maximum by sunrise / sunnoon + 2h locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = sun_times.sunnoon.round() + default_shift_hours # sun noon round to full hour + fix 2h elif min_max_time == 'sun_loc_shift': # take location for minimum and maximum by sunrise / sunnoon + monthly delta locdf.min_loc = sun_times.sunrise.round() # sun rise round to full hour locdf.max_loc = (sun_times.sunnoon + max_delta[locdf.index.month].values).round() # sun noon + shift derived from observed hourly data, round to full hour pos = locdf.min_loc > locdf.max_loc locdf.loc[pos, 'max_loc'] = sun_times.sunnoon[pos].round() + default_shift_hours # standard shift in this case locdf.min_loc = locdf.min_loc.astype(int) locdf.max_loc = locdf.max_loc.astype(int) locdf.min_val_cur = data_daily.tmin locdf.max_val_cur = data_daily.tmax locdf.mean_val_cur = data_daily.temp locdf.min_val_next = data_daily.tmin.shift(-1, 'D') locdf.max_val_next = data_daily.tmax.shift(-1, 'D') locdf.loc[locdf.index[-1], 'min_val_next'] = locdf.min_val_cur.iloc[-1] locdf.loc[locdf.index[-1], 'max_val_next'] = locdf.max_val_cur.iloc[-1] locdf.min_val_before = data_daily.tmin.shift(1, 'D') locdf.max_val_before = data_daily.tmax.shift(1, 'D') locdf.loc[locdf.index[0], 'min_val_before'] = locdf.min_val_cur.iloc[0] locdf.loc[locdf.index[0], 'max_val_before'] = locdf.max_val_cur.iloc[0] locdf_day = locdf locdf = locdf.reindex(temp_disagg.index, method='ffill') # whenever we are before the maximum for the current day, use minimum value of current day for cosine function fitting # once we have passed the maximum value use the minimum for next day to ensure smooth transitions min_val = locdf.min_val_next.copy() min_val[min_val.index.hour < locdf.max_loc] = locdf.min_val_cur # whenever we are before the minimum for the current day, use maximum value of day before for cosine function fitting # once we have passed the minimum value use the maximum for the current day to ensure smooth transitions max_val = locdf.max_val_cur.copy() max_val[max_val.index.hour < locdf.min_loc] = locdf.max_val_before temp_disagg = pd.Series(index=min_val.index) if method in ('sine_min_max', 'sine'): delta_val = max_val - min_val v_trans = min_val + delta_val / 2. if mod_nighttime: before_min = locdf.index.hour <= locdf.min_loc between_min_max = (locdf.index.hour > locdf.min_loc) & (locdf.index.hour < locdf.max_loc) after_max = locdf.index.hour >= locdf.max_loc temp_disagg[before_min] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (hours_per_day - locdf.max_loc + locdf.index.hour)) temp_disagg[between_min_max] = v_trans + delta_val / 2. * np.cos(1.25 * np.pi + 0.75 * np.pi / (locdf.max_loc - locdf.min_loc) * (locdf.index.hour - locdf.min_loc)) temp_disagg[after_max] = v_trans + delta_val / 2. * np.cos(np.pi / (hours_per_day - (locdf.max_loc - locdf.min_loc)) * (locdf.index.hour - locdf.max_loc)) else: temp_disagg[:] = v_trans + (delta_val / 2.) * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) elif method == 'sine_mean': dtr = locdf.max_val_cur - locdf.min_val_cur temp_disagg[:] = locdf.mean_val_cur + dtr / 2. * np.cos(2 * np.pi / hours_per_day * (locdf.index.hour - locdf.max_loc)) polars = sun_times.daylength < daylength_thres if polars.sum() > 0: # during polar night, no diurnal variation of temperature is applied # instead the daily average calculated using tmin and tmax is applied polars_index_hourly = melodist.util.hourly_index(polars[polars].index) temp_disagg.loc[polars_index_hourly] = np.nan avg_before = (locdf_day.min_val_before + locdf_day.max_val_before) / 2. avg_cur = (locdf_day.min_val_cur + locdf_day.max_val_cur) / 2. getting_warmers = polars & (avg_before <= avg_cur) getting_colders = polars & ~(avg_before <= avg_cur) getting_warmers_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_warmers[getting_warmers].index]) getting_warmers_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_warmers[getting_warmers].index]) temp_disagg[getting_warmers_min_loc] = locdf_day.min_val_cur[getting_warmers].values temp_disagg[getting_warmers_max_loc] = locdf_day.max_val_cur[getting_warmers].values getting_colders_min_loc = pd.DatetimeIndex([ts.replace(hour=min_loc_polar) for ts in getting_colders[getting_colders].index]) getting_colders_max_loc = pd.DatetimeIndex([ts.replace(hour=max_loc_polar) for ts in getting_colders[getting_colders].index]) temp_disagg[getting_colders_min_loc] = locdf_day.max_val_cur[getting_colders].values temp_disagg[getting_colders_max_loc] = locdf_day.min_val_cur[getting_colders].values temp_polars = temp_disagg.loc[polars_index_hourly].copy() transition_days = polars[polars.diff() == True].astype(int) # 0 where transition from polar to "normal" mode, 1 where transition from normal to polar if len(transition_days) > 0: polar_to_normal_days = transition_days.index[transition_days == 0] normal_to_polar_days = transition_days.index[transition_days == 1] - pd.Timedelta(days=1) add_days = polar_to_normal_days.union(normal_to_polar_days) temp_polars = temp_polars.append(temp_disagg[melodist.util.hourly_index(add_days)]).sort_index() for day in polar_to_normal_days: min_loc = int(locdf.loc[day].min_loc) temp_polars[day.replace(hour=0):day.replace(hour=min_loc) - pd.Timedelta(hours=1)] = np.nan temp_polars[day.replace(hour=min_loc)] = locdf.min_val_cur[day] for day in normal_to_polar_days: max_loc = int(locdf.loc[day].max_loc) temp_polars[day.replace(hour=max_loc) + pd.Timedelta(hours=1):day.replace(hour=23)] = np.nan temp_interp = temp_polars.interpolate(method='linear', limit=23) temp_disagg[temp_interp.index] = temp_interp elif method == 'mean_course_min_max': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) df = pd.DataFrame(index=temp_disagg.index) df['normval'] = mean_course.unstack().loc[list(zip(df.index.month, df.index.hour))].values df['tmin'] = data_daily_as_hourly.tmin df['tmax'] = data_daily_as_hourly.tmax temp_disagg[:] = df.normval * (df.tmax - df.tmin) + df.tmin elif method == 'mean_course_mean': data_daily_as_hourly = data_daily.reindex(temp_disagg.index, method='ffill', limit=23) dtr = data_daily_as_hourly.tmax - data_daily_as_hourly.tmin mc = pd.Series(index=temp_disagg.index) mean_course_zeromean = mean_course - mean_course.mean() # shift mean course so that the daily mean is 0 mc[:] = mean_course_zeromean.unstack().loc[list(zip(temp_disagg.index.month, temp_disagg.index.hour))].values temp_disagg[:] = data_daily_as_hourly.temp + dtr * mc return temp_disagg
[ "def", "disaggregate_temperature", "(", "data_daily", ",", "method", "=", "'sine_min_max'", ",", "min_max_time", "=", "'fix'", ",", "mod_nighttime", "=", "False", ",", "max_delta", "=", "None", ",", "mean_course", "=", "None", ",", "sun_times", "=", "None", ")...
The disaggregation function for temperature Parameters ---- data_daily : daily data method : method to disaggregate min_max_time: "fix" - min/max temperature at fixed times 7h/14h, "sun_loc" - min/max calculated by sunrise/sunnoon + 2h, "sun_loc_shift" - min/max calculated by sunrise/sunnoon + monthly mean shift, max_delta: maximum monthly temperature shift as returned by get_shift_by_data() sun_times: times of sunrise/noon as returned by get_sun_times()
[ "The", "disaggregation", "function", "for", "temperature" ]
python
train
54.094444
wmayner/pyphi
pyphi/distribution.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/distribution.py#L81-L93
def purview(repertoire): """The purview of the repertoire. Args: repertoire (np.ndarray): A repertoire Returns: tuple[int]: The purview that the repertoire was computed over. """ if repertoire is None: return None return tuple(i for i, dim in enumerate(repertoire.shape) if dim == 2)
[ "def", "purview", "(", "repertoire", ")", ":", "if", "repertoire", "is", "None", ":", "return", "None", "return", "tuple", "(", "i", "for", "i", ",", "dim", "in", "enumerate", "(", "repertoire", ".", "shape", ")", "if", "dim", "==", "2", ")" ]
The purview of the repertoire. Args: repertoire (np.ndarray): A repertoire Returns: tuple[int]: The purview that the repertoire was computed over.
[ "The", "purview", "of", "the", "repertoire", "." ]
python
train
24.769231
ArchiveTeam/wpull
wpull/network/pool.py
https://github.com/ArchiveTeam/wpull/blob/ddf051aa3322479325ba20aa778cb2cb97606bf5/wpull/network/pool.py#L328-L333
def get_preferred(self, addr_1, addr_2): '''Return the preferred address.''' if addr_1 > addr_2: addr_1, addr_2 = addr_2, addr_1 return self._cache.get((addr_1, addr_2))
[ "def", "get_preferred", "(", "self", ",", "addr_1", ",", "addr_2", ")", ":", "if", "addr_1", ">", "addr_2", ":", "addr_1", ",", "addr_2", "=", "addr_2", ",", "addr_1", "return", "self", ".", "_cache", ".", "get", "(", "(", "addr_1", ",", "addr_2", ")...
Return the preferred address.
[ "Return", "the", "preferred", "address", "." ]
python
train
33.5
djtaylor/python-lsbinit
lsbinit/__init__.py
https://github.com/djtaylor/python-lsbinit/blob/a41fc551226f61ac2bf1b8b0f3f5395db85e75a2/lsbinit/__init__.py#L51-L70
def _colorize(self, msg, color=None, encode=False): """ Colorize a string. """ # Valid colors colors = { 'red': '31', 'green': '32', 'yellow': '33' } # No color specified or unsupported color if not color or not color in colors: return msg # The colorized string if encode: return u'\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg) return '\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg)
[ "def", "_colorize", "(", "self", ",", "msg", ",", "color", "=", "None", ",", "encode", "=", "False", ")", ":", "# Valid colors", "colors", "=", "{", "'red'", ":", "'31'", ",", "'green'", ":", "'32'", ",", "'yellow'", ":", "'33'", "}", "# No color speci...
Colorize a string.
[ "Colorize", "a", "string", "." ]
python
train
27.3
bukun/TorCMS
torcms/model/log_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/log_model.py#L20-L35
def add(data_dic): ''' Insert new record. ''' uid = data_dic['uid'] TabLog.create( uid=uid, current_url=data_dic['url'], refer_url=data_dic['refer'], user_id=data_dic['user_id'], time_create=data_dic['timein'], time_out=data_dic['timeOut'], time=data_dic['timeon'] ) return uid
[ "def", "add", "(", "data_dic", ")", ":", "uid", "=", "data_dic", "[", "'uid'", "]", "TabLog", ".", "create", "(", "uid", "=", "uid", ",", "current_url", "=", "data_dic", "[", "'url'", "]", ",", "refer_url", "=", "data_dic", "[", "'refer'", "]", ",", ...
Insert new record.
[ "Insert", "new", "record", "." ]
python
train
25.1875
ff0000/scarlet
scarlet/assets/models.py
https://github.com/ff0000/scarlet/blob/6c37befd810916a2d7ffff2cdb2dab57bcb6d12e/scarlet/assets/models.py#L100-L114
def ensure_crops(self, *required_crops): """ Make sure a crop exists for each crop in required_crops. Existing crops will not be changed. If settings.ASSET_CELERY is specified then the task will be run async """ if self._can_crop(): if settings.CELERY or settings.USE_CELERY_DECORATOR: # this means that we are using celery args = [self.pk]+list(required_crops) tasks.ensure_crops.apply_async(args=args, countdown=5) else: tasks.ensure_crops(None, *required_crops, asset=self)
[ "def", "ensure_crops", "(", "self", ",", "*", "required_crops", ")", ":", "if", "self", ".", "_can_crop", "(", ")", ":", "if", "settings", ".", "CELERY", "or", "settings", ".", "USE_CELERY_DECORATOR", ":", "# this means that we are using celery", "args", "=", ...
Make sure a crop exists for each crop in required_crops. Existing crops will not be changed. If settings.ASSET_CELERY is specified then the task will be run async
[ "Make", "sure", "a", "crop", "exists", "for", "each", "crop", "in", "required_crops", ".", "Existing", "crops", "will", "not", "be", "changed", "." ]
python
train
40.466667
pgmpy/pgmpy
pgmpy/readwrite/BIF.py
https://github.com/pgmpy/pgmpy/blob/9381a66aba3c3871d3ccd00672b148d17d63239e/pgmpy/readwrite/BIF.py#L87-L103
def get_probability_grammar(self): """ A method that returns probability grammar """ # Creating valid word expression for probability, it is of the format # wor1 | var2 , var3 or var1 var2 var3 or simply var word_expr = Word(alphanums + '-' + '_') + Suppress(Optional("|")) + Suppress(Optional(",")) word_expr2 = Word(initChars=printables, excludeChars=[',', ')', ' ', '(']) + Suppress(Optional(",")) # creating an expression for valid numbers, of the format # 1.00 or 1 or 1.00. 0.00 or 9.8e-5 etc num_expr = Word(nums + '-' + '+' + 'e' + 'E' + '.') + Suppress(Optional(",")) probability_expr = Suppress('probability') + Suppress('(') + OneOrMore(word_expr) + Suppress(')') optional_expr = Suppress('(') + OneOrMore(word_expr2) + Suppress(')') probab_attributes = optional_expr | Suppress('table') cpd_expr = probab_attributes + OneOrMore(num_expr) return probability_expr, cpd_expr
[ "def", "get_probability_grammar", "(", "self", ")", ":", "# Creating valid word expression for probability, it is of the format", "# wor1 | var2 , var3 or var1 var2 var3 or simply var", "word_expr", "=", "Word", "(", "alphanums", "+", "'-'", "+", "'_'", ")", "+", "Suppress", ...
A method that returns probability grammar
[ "A", "method", "that", "returns", "probability", "grammar" ]
python
train
58.117647
gem/oq-engine
openquake/hazardlib/gsim/abrahamson_2015.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/hazardlib/gsim/abrahamson_2015.py#L134-L148
def _compute_magnitude_term(self, C, dc1, mag): """ Computes the magnitude scaling term given by equation (2) """ base = C['theta1'] + (self.CONSTS['theta4'] * dc1) dmag = self.CONSTS["C1"] + dc1 if mag > dmag: f_mag = (self.CONSTS['theta5'] * (mag - dmag)) +\ C['theta13'] * ((10. - mag) ** 2.) else: f_mag = (self.CONSTS['theta4'] * (mag - dmag)) +\ C['theta13'] * ((10. - mag) ** 2.) return base + f_mag
[ "def", "_compute_magnitude_term", "(", "self", ",", "C", ",", "dc1", ",", "mag", ")", ":", "base", "=", "C", "[", "'theta1'", "]", "+", "(", "self", ".", "CONSTS", "[", "'theta4'", "]", "*", "dc1", ")", "dmag", "=", "self", ".", "CONSTS", "[", "\...
Computes the magnitude scaling term given by equation (2)
[ "Computes", "the", "magnitude", "scaling", "term", "given", "by", "equation", "(", "2", ")" ]
python
train
34.266667
noxdafox/vminspect
vminspect/comparator.py
https://github.com/noxdafox/vminspect/blob/e685282564877e2d1950f1e09b292f4f4db1dbcd/vminspect/comparator.py#L345-L356
def compare_hives(fs0, fs1): """Compares all the windows registry hive files returning those which differ. """ registries = [] for path in chain(registries_path(fs0.fsroot), user_registries(fs0, fs1)): if fs0.checksum(path) != fs1.checksum(path): registries.append(path) return registries
[ "def", "compare_hives", "(", "fs0", ",", "fs1", ")", ":", "registries", "=", "[", "]", "for", "path", "in", "chain", "(", "registries_path", "(", "fs0", ".", "fsroot", ")", ",", "user_registries", "(", "fs0", ",", "fs1", ")", ")", ":", "if", "fs0", ...
Compares all the windows registry hive files returning those which differ.
[ "Compares", "all", "the", "windows", "registry", "hive", "files", "returning", "those", "which", "differ", "." ]
python
train
27