repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
fboender/ansible-cmdb
lib/mako/runtime.py
https://github.com/fboender/ansible-cmdb/blob/ebd960ac10684e8c9ec2b12751bba2c4c9504ab7/lib/mako/runtime.py#L732-L759
def _inherit_from(context, uri, calling_uri): """called by the _inherit method in template modules to set up the inheritance chain at the start of a template's execution.""" if uri is None: return None template = _lookup_template(context, uri, calling_uri) self_ns = context['self'] ih = self_ns while ih.inherits is not None: ih = ih.inherits lclcontext = context._locals({'next': ih}) ih.inherits = TemplateNamespace("self:%s" % template.uri, lclcontext, template=template, populate_self=False) context._data['parent'] = lclcontext._data['local'] = ih.inherits callable_ = getattr(template.module, '_mako_inherit', None) if callable_ is not None: ret = callable_(template, lclcontext) if ret: return ret gen_ns = getattr(template.module, '_mako_generate_namespaces', None) if gen_ns is not None: gen_ns(context) return (template.callable_, lclcontext)
[ "def", "_inherit_from", "(", "context", ",", "uri", ",", "calling_uri", ")", ":", "if", "uri", "is", "None", ":", "return", "None", "template", "=", "_lookup_template", "(", "context", ",", "uri", ",", "calling_uri", ")", "self_ns", "=", "context", "[", ...
called by the _inherit method in template modules to set up the inheritance chain at the start of a template's execution.
[ "called", "by", "the", "_inherit", "method", "in", "template", "modules", "to", "set", "up", "the", "inheritance", "chain", "at", "the", "start", "of", "a", "template", "s", "execution", "." ]
python
train
37.142857
amelchio/pysonos
pysonos/core.py
https://github.com/amelchio/pysonos/blob/23527c445a00e198fbb94d44b92f7f99d139e325/pysonos/core.py#L705-L716
def mute(self): """bool: The speaker's mute state. True if muted, False otherwise. """ response = self.renderingControl.GetMute([ ('InstanceID', 0), ('Channel', 'Master') ]) mute_state = response['CurrentMute'] return bool(int(mute_state))
[ "def", "mute", "(", "self", ")", ":", "response", "=", "self", ".", "renderingControl", ".", "GetMute", "(", "[", "(", "'InstanceID'", ",", "0", ")", ",", "(", "'Channel'", ",", "'Master'", ")", "]", ")", "mute_state", "=", "response", "[", "'CurrentMu...
bool: The speaker's mute state. True if muted, False otherwise.
[ "bool", ":", "The", "speaker", "s", "mute", "state", "." ]
python
train
25.833333
bpython/curtsies
examples/tttplaybitboard.py
https://github.com/bpython/curtsies/blob/223e42b97fbf6c86b479ed4f0963a067333c5a63/examples/tttplaybitboard.py#L161-L165
def apply_move(grid, move): "Try to move: return a new grid, or None if illegal." p, q = grid bit = 1 << move return (q, p | bit) if 0 == (bit & (p | q)) else None
[ "def", "apply_move", "(", "grid", ",", "move", ")", ":", "p", ",", "q", "=", "grid", "bit", "=", "1", "<<", "move", "return", "(", "q", ",", "p", "|", "bit", ")", "if", "0", "==", "(", "bit", "&", "(", "p", "|", "q", ")", ")", "else", "No...
Try to move: return a new grid, or None if illegal.
[ "Try", "to", "move", ":", "return", "a", "new", "grid", "or", "None", "if", "illegal", "." ]
python
train
35
OpenKMIP/PyKMIP
kmip/core/messages/payloads/activate.py
https://github.com/OpenKMIP/PyKMIP/blob/b51c5b044bd05f8c85a1d65d13a583a4d8fc1b0e/kmip/core/messages/payloads/activate.py#L95-L103
def validate(self): """ Error check the attributes of the ActivateRequestPayload object. """ if self.unique_identifier is not None: if not isinstance(self.unique_identifier, attributes.UniqueIdentifier): msg = "invalid unique identifier" raise TypeError(msg)
[ "def", "validate", "(", "self", ")", ":", "if", "self", ".", "unique_identifier", "is", "not", "None", ":", "if", "not", "isinstance", "(", "self", ".", "unique_identifier", ",", "attributes", ".", "UniqueIdentifier", ")", ":", "msg", "=", "\"invalid unique ...
Error check the attributes of the ActivateRequestPayload object.
[ "Error", "check", "the", "attributes", "of", "the", "ActivateRequestPayload", "object", "." ]
python
test
39.555556
angr/angr
angr/analyses/congruency_check.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/analyses/congruency_check.py#L281-L342
def compare_states(self, sl, sr): """ Compares two states for similarity. """ joint_solver = claripy.Solver() # make sure the canonicalized constraints are the same n_map, n_counter, n_canon_constraint = claripy.And(*sr.solver.constraints).canonicalize() #pylint:disable=no-member u_map, u_counter, u_canon_constraint = claripy.And(*sl.solver.constraints).canonicalize() #pylint:disable=no-member n_canoner_constraint = sr.solver.simplify(n_canon_constraint) u_canoner_constraint = sl.solver.simplify(u_canon_constraint) joint_solver.add((n_canoner_constraint, u_canoner_constraint)) if n_canoner_constraint is not u_canoner_constraint: self._report_incongruency("Different constraints!") return False # get the differences in registers and memory mem_diff = sr.memory.changed_bytes(sl.memory) reg_diff = sr.registers.changed_bytes(sl.registers) # this is only for unicorn if "UNICORN" in sl.options or "UNICORN" in sr.options: if sl.arch.name == "X86": reg_diff -= set(range(40, 52)) #ignore cc psuedoregisters reg_diff -= set(range(320, 324)) #some other VEX weirdness reg_diff -= set(range(340, 344)) #ip_at_syscall elif sl.arch.name == "AMD64": reg_diff -= set(range(144, 168)) #ignore cc psuedoregisters # make sure the differences in registers and memory are actually just renamed # versions of the same ASTs for diffs,(um,nm) in ( (reg_diff, (sl.registers, sr.registers)), (mem_diff, (sl.memory, sr.memory)), ): for i in diffs: bn = nm.load(i, 1) bu = um.load(i, 1) bnc = bn.canonicalize(var_map=n_map, counter=n_counter)[-1] buc = bu.canonicalize(var_map=u_map, counter=u_counter)[-1] if bnc is not buc: self._report_incongruency("Different memory or registers (index %d, values %r and %r)!", i, bn, bu) return False # make sure the flags are the same if sl.arch.name in ("AMD64", "X86", "ARM", "ARMEL", "ARMHF", "AARCH64"): # pylint: disable=unused-variable n_bkp = sr.regs.cc_op, sr.regs.cc_dep1, sr.regs.cc_dep2, sr.regs.cc_ndep u_bkp = sl.regs.cc_op, sl.regs.cc_dep1, sl.regs.cc_dep2, sl.regs.cc_ndep if sl.arch.name in ('AMD64', 'X86'): n_flags = sr.regs.eflags.canonicalize(var_map=n_map, counter=n_counter)[-1] u_flags = sl.regs.eflags.canonicalize(var_map=u_map, counter=u_counter)[-1] else: n_flags = sr.regs.flags.canonicalize(var_map=n_map, counter=n_counter)[-1] u_flags = sl.regs.flags.canonicalize(var_map=u_map, counter=u_counter)[-1] if n_flags is not u_flags and sl.solver.simplify(n_flags) is not sr.solver.simplify(u_flags): self._report_incongruency("Different flags!") return False return True
[ "def", "compare_states", "(", "self", ",", "sl", ",", "sr", ")", ":", "joint_solver", "=", "claripy", ".", "Solver", "(", ")", "# make sure the canonicalized constraints are the same", "n_map", ",", "n_counter", ",", "n_canon_constraint", "=", "claripy", ".", "And...
Compares two states for similarity.
[ "Compares", "two", "states", "for", "similarity", "." ]
python
train
49.822581
refindlyllc/rets
rets/session.py
https://github.com/refindlyllc/rets/blob/c615dfc272cff0825fd3b50863c46afc3e33916f/rets/session.py#L197-L238
def _make_metadata_request(self, meta_id, metadata_type=None): """ Get the Metadata. The Session initializes with 'COMPACT-DECODED' as the format type. If that returns a DTD error then we change to the 'STANDARD-XML' format and try again. :param meta_id: The name of the resource, class, or lookup to get metadata for :param metadata_type: The RETS metadata type :return: list """ # If this metadata _request has already happened, returned the saved result. key = '{0!s}:{1!s}'.format(metadata_type, meta_id) if key in self.metadata_responses and self.cache_metadata: response = self.metadata_responses[key] else: response = self._request( capability='GetMetadata', options={ 'query': { 'Type': metadata_type, 'ID': meta_id, 'Format': self.metadata_format } } ) self.metadata_responses[key] = response if self.metadata_format == 'COMPACT-DECODED': parser = CompactMetadata() else: parser = StandardXMLetadata() try: return parser.parse(response=response, metadata_type=metadata_type) except RETSException as e: # Remove response from cache self.metadata_responses.pop(key, None) # If the server responds with an invalid parameter for COMPACT-DECODED, try STANDARD-XML if self.metadata_format != 'STANDARD-XML' and e.reply_code in ['20513', '20514']: self.metadata_responses.pop(key, None) self.metadata_format = 'STANDARD-XML' return self._make_metadata_request(meta_id=meta_id, metadata_type=metadata_type) raise RETSException(e.reply_text, e.reply_code)
[ "def", "_make_metadata_request", "(", "self", ",", "meta_id", ",", "metadata_type", "=", "None", ")", ":", "# If this metadata _request has already happened, returned the saved result.", "key", "=", "'{0!s}:{1!s}'", ".", "format", "(", "metadata_type", ",", "meta_id", ")"...
Get the Metadata. The Session initializes with 'COMPACT-DECODED' as the format type. If that returns a DTD error then we change to the 'STANDARD-XML' format and try again. :param meta_id: The name of the resource, class, or lookup to get metadata for :param metadata_type: The RETS metadata type :return: list
[ "Get", "the", "Metadata", ".", "The", "Session", "initializes", "with", "COMPACT", "-", "DECODED", "as", "the", "format", "type", ".", "If", "that", "returns", "a", "DTD", "error", "then", "we", "change", "to", "the", "STANDARD", "-", "XML", "format", "a...
python
train
44.97619
census-instrumentation/opencensus-python
opencensus/trace/tracers/context_tracer.py
https://github.com/census-instrumentation/opencensus-python/blob/992b223f7e34c5dcb65922b7d5c827e7a1351e7d/opencensus/trace/tracers/context_tracer.py#L149-L176
def get_span_datas(self, span): """Extracts a list of SpanData tuples from a span :rtype: list of opencensus.trace.span_data.SpanData :return list of SpanData tuples """ span_datas = [ span_data_module.SpanData( name=ss.name, context=self.span_context, span_id=ss.span_id, parent_span_id=ss.parent_span.span_id if ss.parent_span else None, attributes=ss.attributes, start_time=ss.start_time, end_time=ss.end_time, child_span_count=len(ss.children), stack_trace=ss.stack_trace, time_events=ss.time_events, links=ss.links, status=ss.status, same_process_as_parent_span=ss.same_process_as_parent_span, span_kind=ss.span_kind ) for ss in span ] return span_datas
[ "def", "get_span_datas", "(", "self", ",", "span", ")", ":", "span_datas", "=", "[", "span_data_module", ".", "SpanData", "(", "name", "=", "ss", ".", "name", ",", "context", "=", "self", ".", "span_context", ",", "span_id", "=", "ss", ".", "span_id", ...
Extracts a list of SpanData tuples from a span :rtype: list of opencensus.trace.span_data.SpanData :return list of SpanData tuples
[ "Extracts", "a", "list", "of", "SpanData", "tuples", "from", "a", "span" ]
python
train
34.464286
google/prettytensor
prettytensor/tutorial/data_utils.py
https://github.com/google/prettytensor/blob/75daa0b11252590f548da5647addc0ea610c4c45/prettytensor/tutorial/data_utils.py#L92-L106
def mnist(training): """Downloads MNIST and loads it into numpy arrays.""" if training: data_filename = 'train-images-idx3-ubyte.gz' labels_filename = 'train-labels-idx1-ubyte.gz' count = 60000 else: data_filename = 't10k-images-idx3-ubyte.gz' labels_filename = 't10k-labels-idx1-ubyte.gz' count = 10000 data_filename = maybe_download(MNIST_URL, data_filename) labels_filename = maybe_download(MNIST_URL, labels_filename) return (mnist_extract_data(data_filename, count), mnist_extract_labels(labels_filename, count))
[ "def", "mnist", "(", "training", ")", ":", "if", "training", ":", "data_filename", "=", "'train-images-idx3-ubyte.gz'", "labels_filename", "=", "'train-labels-idx1-ubyte.gz'", "count", "=", "60000", "else", ":", "data_filename", "=", "'t10k-images-idx3-ubyte.gz'", "labe...
Downloads MNIST and loads it into numpy arrays.
[ "Downloads", "MNIST", "and", "loads", "it", "into", "numpy", "arrays", "." ]
python
train
36.666667
modin-project/modin
modin/backends/pandas/query_compiler.py
https://github.com/modin-project/modin/blob/5b77d242596560c646b8405340c9ce64acb183cb/modin/backends/pandas/query_compiler.py#L1251-L1263
def median(self, **kwargs): """Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row. """ if self._is_transposed: kwargs["axis"] = kwargs.get("axis", 0) ^ 1 return self.transpose().median(**kwargs) # Pandas default is 0 (though not mentioned in docs) axis = kwargs.get("axis", 0) func = self._build_mapreduce_func(pandas.DataFrame.median, **kwargs) return self._full_axis_reduce(axis, func)
[ "def", "median", "(", "self", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_is_transposed", ":", "kwargs", "[", "\"axis\"", "]", "=", "kwargs", ".", "get", "(", "\"axis\"", ",", "0", ")", "^", "1", "return", "self", ".", "transpose", "(",...
Returns median of each column or row. Returns: A new QueryCompiler object containing the median of each column or row.
[ "Returns", "median", "of", "each", "column", "or", "row", "." ]
python
train
41.769231
yyuu/botornado
boto/__init__.py
https://github.com/yyuu/botornado/blob/fffb056f5ff2324d1d5c1304014cfb1d899f602e/boto/__init__.py#L521-L536
def check_extensions(module_name, module_path): """ This function checks for extensions to boto modules. It should be called in the __init__.py file of all boto modules. See: http://code.google.com/p/boto/wiki/ExtendModules for details. """ option_name = '%s_extend' % module_name version = config.get('Boto', option_name, None) if version: dirname = module_path[0] path = os.path.join(dirname, version) if os.path.isdir(path): log.info('extending module %s with: %s' % (module_name, path)) module_path.insert(0, path)
[ "def", "check_extensions", "(", "module_name", ",", "module_path", ")", ":", "option_name", "=", "'%s_extend'", "%", "module_name", "version", "=", "config", ".", "get", "(", "'Boto'", ",", "option_name", ",", "None", ")", "if", "version", ":", "dirname", "=...
This function checks for extensions to boto modules. It should be called in the __init__.py file of all boto modules. See: http://code.google.com/p/boto/wiki/ExtendModules for details.
[ "This", "function", "checks", "for", "extensions", "to", "boto", "modules", ".", "It", "should", "be", "called", "in", "the", "__init__", ".", "py", "file", "of", "all", "boto", "modules", ".", "See", ":", "http", ":", "//", "code", ".", "google", ".",...
python
train
36.875
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/task_agent/task_agent_client.py#L829-L843
def delete_variable_group(self, project, group_id): """DeleteVariableGroup. [Preview API] Delete a variable group :param str project: Project ID or project name :param int group_id: Id of the variable group. """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if group_id is not None: route_values['groupId'] = self._serialize.url('group_id', group_id, 'int') self._send(http_method='DELETE', location_id='f5b09dd5-9d54-45a1-8b5a-1c8287d634cc', version='5.1-preview.1', route_values=route_values)
[ "def", "delete_variable_group", "(", "self", ",", "project", ",", "group_id", ")", ":", "route_values", "=", "{", "}", "if", "project", "is", "not", "None", ":", "route_values", "[", "'project'", "]", "=", "self", ".", "_serialize", ".", "url", "(", "'pr...
DeleteVariableGroup. [Preview API] Delete a variable group :param str project: Project ID or project name :param int group_id: Id of the variable group.
[ "DeleteVariableGroup", ".", "[", "Preview", "API", "]", "Delete", "a", "variable", "group", ":", "param", "str", "project", ":", "Project", "ID", "or", "project", "name", ":", "param", "int", "group_id", ":", "Id", "of", "the", "variable", "group", "." ]
python
train
46.8
saltstack/salt
salt/transport/ipc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/transport/ipc.py#L616-L629
def close(self): ''' Routines to handle any cleanup before the instance shuts down. Sockets and filehandles should be closed explicitly, to prevent leaks. ''' if self._closing: return self._closing = True for stream in self.streams: stream.close() self.streams.clear() if hasattr(self.sock, 'close'): self.sock.close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closing", ":", "return", "self", ".", "_closing", "=", "True", "for", "stream", "in", "self", ".", "streams", ":", "stream", ".", "close", "(", ")", "self", ".", "streams", ".", "clear", "...
Routines to handle any cleanup before the instance shuts down. Sockets and filehandles should be closed explicitly, to prevent leaks.
[ "Routines", "to", "handle", "any", "cleanup", "before", "the", "instance", "shuts", "down", ".", "Sockets", "and", "filehandles", "should", "be", "closed", "explicitly", "to", "prevent", "leaks", "." ]
python
train
30.071429
BerkeleyAutomation/autolab_core
autolab_core/rigid_transformations.py
https://github.com/BerkeleyAutomation/autolab_core/blob/8f3813f6401972868cc5e3981ba1b4382d4418d5/autolab_core/rigid_transformations.py#L560-L590
def delete_from_ros(self, service_name='rigid_transforms/rigid_transform_publisher', namespace=None): """Removes RigidTransform referencing from_frame and to_frame from ROS publisher. Note that this may not be this exact transform, but may that references the same frames (order doesn't matter) Also, note that it may take quite a while for the transform to disappear from rigid_transform_publisher's cache Requires ROS rigid_transform_publisher service to be running. Assuming autolab_core is installed as a catkin package, this can be done with: roslaunch autolab_core rigid_transforms.launch Parameters ---------- service_name : string, optional RigidTransformPublisher service to interface with. If the RigidTransformPublisher services are started through rigid_transforms.launch it will be called rigid_transform_publisher namespace : string, optional Namespace to prepend to transform_listener_service. If None, current namespace is prepended. Raises ------ rospy.ServiceException If service call to rigid_transform_publisher fails """ if namespace == None: service_name = rospy.get_namespace() + service_name else: service_name = namespace + service_name rospy.wait_for_service(service_name, timeout = 10) publisher = rospy.ServiceProxy(service_name, RigidTransformPublisher) publisher(0, 0, 0, 0, 0, 0, 0, self.from_frame, self.to_frame, 'delete')
[ "def", "delete_from_ros", "(", "self", ",", "service_name", "=", "'rigid_transforms/rigid_transform_publisher'", ",", "namespace", "=", "None", ")", ":", "if", "namespace", "==", "None", ":", "service_name", "=", "rospy", ".", "get_namespace", "(", ")", "+", "se...
Removes RigidTransform referencing from_frame and to_frame from ROS publisher. Note that this may not be this exact transform, but may that references the same frames (order doesn't matter) Also, note that it may take quite a while for the transform to disappear from rigid_transform_publisher's cache Requires ROS rigid_transform_publisher service to be running. Assuming autolab_core is installed as a catkin package, this can be done with: roslaunch autolab_core rigid_transforms.launch Parameters ---------- service_name : string, optional RigidTransformPublisher service to interface with. If the RigidTransformPublisher services are started through rigid_transforms.launch it will be called rigid_transform_publisher namespace : string, optional Namespace to prepend to transform_listener_service. If None, current namespace is prepended. Raises ------ rospy.ServiceException If service call to rigid_transform_publisher fails
[ "Removes", "RigidTransform", "referencing", "from_frame", "and", "to_frame", "from", "ROS", "publisher", ".", "Note", "that", "this", "may", "not", "be", "this", "exact", "transform", "but", "may", "that", "references", "the", "same", "frames", "(", "order", "...
python
train
51.612903
nok/sklearn-porter
sklearn_porter/estimator/classifier/GaussianNB/__init__.py
https://github.com/nok/sklearn-porter/blob/04673f768310bde31f9747a68a5e070592441ef2/sklearn_porter/estimator/classifier/GaussianNB/__init__.py#L61-L135
def export(self, class_name, method_name, export_data=False, export_dir='.', export_filename='data.json', export_append_checksum=False, **kwargs): """ Port a trained estimator to the syntax of a chosen programming language. Parameters ---------- :param class_name : string The name of the class in the returned result. :param method_name : string The name of the method in the returned result. :param export_data : bool, default: False Whether the model data should be saved or not. :param export_dir : string, default: '.' (current directory) The directory where the model data should be saved. :param export_filename : string, default: 'data.json' The filename of the exported model data. :param export_append_checksum : bool, default: False Whether to append the checksum to the filename or not. Returns ------- :return : string The transpiled algorithm with the defined placeholders. """ # Arguments: self.class_name = class_name self.method_name = method_name # Estimator: est = self.estimator self.n_features = len(est.sigma_[0]) self.n_classes = len(est.classes_) temp_type = self.temp('type') temp_arr = self.temp('arr') temp_arr_ = self.temp('arr[]') temp_arr__ = self.temp('arr[][]') # Create class prior probabilities: priors = [temp_type.format(self.repr(c)) for c in est.class_prior_] priors = ', '.join(priors) self.priors = temp_arr_.format(type='double', name='priors', values=priors) # Create sigmas: sigmas = [] for sigma in est.sigma_: tmp = [temp_type.format(self.repr(s)) for s in sigma] tmp = temp_arr.format(', '.join(tmp)) sigmas.append(tmp) sigmas = ', '.join(sigmas) self.sigmas = temp_arr__.format(type='double', name='sigmas', values=sigmas) # Create thetas: thetas = [] for theta in est.theta_: tmp = [temp_type.format(self.repr(t)) for t in theta] tmp = temp_arr.format(', '.join(tmp)) thetas.append(tmp) thetas = ', '.join(thetas) self.thetas = temp_arr__.format(type='double', name='thetas', values=thetas) if self.target_method == 'predict': # Exported: if export_data and os.path.isdir(export_dir): self.export_data(export_dir, export_filename, export_append_checksum) return self.predict('exported') # Separated: return self.predict('separated')
[ "def", "export", "(", "self", ",", "class_name", ",", "method_name", ",", "export_data", "=", "False", ",", "export_dir", "=", "'.'", ",", "export_filename", "=", "'data.json'", ",", "export_append_checksum", "=", "False", ",", "*", "*", "kwargs", ")", ":", ...
Port a trained estimator to the syntax of a chosen programming language. Parameters ---------- :param class_name : string The name of the class in the returned result. :param method_name : string The name of the method in the returned result. :param export_data : bool, default: False Whether the model data should be saved or not. :param export_dir : string, default: '.' (current directory) The directory where the model data should be saved. :param export_filename : string, default: 'data.json' The filename of the exported model data. :param export_append_checksum : bool, default: False Whether to append the checksum to the filename or not. Returns ------- :return : string The transpiled algorithm with the defined placeholders.
[ "Port", "a", "trained", "estimator", "to", "the", "syntax", "of", "a", "chosen", "programming", "language", "." ]
python
train
38.04
Demonware/jose
jose.py
https://github.com/Demonware/jose/blob/5835ec9c9fcab17eddea3c3169881ec12df552d4/jose.py#L315-L380
def legacy_decrypt(jwe, jwk, adata='', validate_claims=True, expiry_seconds=None): """ Decrypts a deserialized :class:`~jose.JWE` :param jwe: An instance of :class:`~jose.JWE` :param jwk: A `dict` representing the JWK required to decrypt the content of the :class:`~jose.JWE`. :param adata: Arbitrary string data used during encryption for additional authentication. :param validate_claims: A `bool` indicating whether or not the `exp`, `iat` and `nbf` claims should be validated. Defaults to `True`. :param expiry_seconds: An `int` containing the JWT expiry in seconds, used when evaluating the `iat` claim. Defaults to `None`, which disables `iat` claim validation. :rtype: :class:`~jose.JWT` :raises: :class:`~jose.Expired` if the JWT has expired :raises: :class:`~jose.NotYetValid` if the JWT is not yet valid :raises: :class:`~jose.Error` if there is an error decrypting the JWE """ protected_header, encrypted_key, iv, ciphertext, authentication_tag = map( b64decode_url, jwe) header = json_decode(protected_header) alg = header[HEADER_ALG] enc = header[HEADER_ENC] # decrypt cek encryption_key = _decrypt_key(encrypted_key, jwk, alg) # decrypt body ((_, decipher), _), ((hash_fn, _), mod) = JWA[enc] version = header.get(_TEMP_VER_KEY) if version: plaintext = decipher(ciphertext, encryption_key[-mod.digest_size/2:], iv) hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version), encryption_key[:-mod.digest_size/2], mod=mod) else: plaintext = decipher(ciphertext, encryption_key[:-mod.digest_size], iv) hash = hash_fn(_jwe_hash_str(ciphertext, iv, adata, version), encryption_key[-mod.digest_size:], mod=mod) if not const_compare(auth_tag(hash), authentication_tag): raise Error('Mismatched authentication tags') if HEADER_ZIP in header: try: (_, decompress) = COMPRESSION[header[HEADER_ZIP]] except KeyError: raise Error('Unsupported compression algorithm: {}'.format( header[HEADER_ZIP])) plaintext = decompress(plaintext) claims = json_decode(plaintext) try: del claims[_TEMP_VER_KEY] except KeyError: # expected when decrypting legacy tokens pass _validate(claims, validate_claims, expiry_seconds) return JWT(header, claims)
[ "def", "legacy_decrypt", "(", "jwe", ",", "jwk", ",", "adata", "=", "''", ",", "validate_claims", "=", "True", ",", "expiry_seconds", "=", "None", ")", ":", "protected_header", ",", "encrypted_key", ",", "iv", ",", "ciphertext", ",", "authentication_tag", "=...
Decrypts a deserialized :class:`~jose.JWE` :param jwe: An instance of :class:`~jose.JWE` :param jwk: A `dict` representing the JWK required to decrypt the content of the :class:`~jose.JWE`. :param adata: Arbitrary string data used during encryption for additional authentication. :param validate_claims: A `bool` indicating whether or not the `exp`, `iat` and `nbf` claims should be validated. Defaults to `True`. :param expiry_seconds: An `int` containing the JWT expiry in seconds, used when evaluating the `iat` claim. Defaults to `None`, which disables `iat` claim validation. :rtype: :class:`~jose.JWT` :raises: :class:`~jose.Expired` if the JWT has expired :raises: :class:`~jose.NotYetValid` if the JWT is not yet valid :raises: :class:`~jose.Error` if there is an error decrypting the JWE
[ "Decrypts", "a", "deserialized", ":", "class", ":", "~jose", ".", "JWE" ]
python
train
38.757576
saltstack/salt
salt/modules/rabbitmq.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/rabbitmq.py#L789-L805
def list_queues(runas=None, *args): ''' Returns queue details of the / virtual host CLI Example: .. code-block:: bash salt '*' rabbitmq.list_queues messages consumers ''' if runas is None and not salt.utils.platform.is_windows(): runas = salt.utils.user.get_user() cmd = [RABBITMQCTL, 'list_queues', '-q'] cmd.extend(args) res = __salt__['cmd.run_all'](cmd, reset_system_locale=False, runas=runas, python_shell=False) _check_response(res) return _output_to_dict(res['stdout'])
[ "def", "list_queues", "(", "runas", "=", "None", ",", "*", "args", ")", ":", "if", "runas", "is", "None", "and", "not", "salt", ".", "utils", ".", "platform", ".", "is_windows", "(", ")", ":", "runas", "=", "salt", ".", "utils", ".", "user", ".", ...
Returns queue details of the / virtual host CLI Example: .. code-block:: bash salt '*' rabbitmq.list_queues messages consumers
[ "Returns", "queue", "details", "of", "the", "/", "virtual", "host" ]
python
train
30.764706
opennode/waldur-core
waldur_core/structure/metadata.py
https://github.com/opennode/waldur-core/blob/d6c17a9592bb6c49c33567542eef8d099605a46a/waldur_core/structure/metadata.py#L186-L230
def get_field_info(self, field, field_name): """ Given an instance of a serializer field, return a dictionary of metadata about it. """ field_info = OrderedDict() field_info['type'] = self.label_lookup[field] field_info['required'] = getattr(field, 'required', False) attrs = [ 'label', 'help_text', 'default_value', 'placeholder', 'required', 'min_length', 'max_length', 'min_value', 'max_value', 'many' ] if getattr(field, 'read_only', False): return None for attr in attrs: value = getattr(field, attr, None) if value is not None and value != '': field_info[attr] = force_text(value, strings_only=True) if 'label' not in field_info: field_info['label'] = field_name.replace('_', ' ').title() if hasattr(field, 'view_name'): list_view = field.view_name.replace('-detail', '-list') base_url = reverse(list_view, request=self.request) field_info['type'] = 'select' field_info['url'] = base_url if hasattr(field, 'query_params'): field_info['url'] += '?%s' % urlencode(field.query_params) field_info['value_field'] = getattr(field, 'value_field', 'url') field_info['display_name_field'] = getattr(field, 'display_name_field', 'display_name') if hasattr(field, 'choices') and not hasattr(field, 'queryset'): field_info['choices'] = [ { 'value': choice_value, 'display_name': force_text(choice_name, strings_only=True) } for choice_value, choice_name in field.choices.items() ] return field_info
[ "def", "get_field_info", "(", "self", ",", "field", ",", "field_name", ")", ":", "field_info", "=", "OrderedDict", "(", ")", "field_info", "[", "'type'", "]", "=", "self", ".", "label_lookup", "[", "field", "]", "field_info", "[", "'required'", "]", "=", ...
Given an instance of a serializer field, return a dictionary of metadata about it.
[ "Given", "an", "instance", "of", "a", "serializer", "field", "return", "a", "dictionary", "of", "metadata", "about", "it", "." ]
python
train
39.444444
IRC-SPHERE/HyperStream
hyperstream/stream/stream.py
https://github.com/IRC-SPHERE/HyperStream/blob/98478f4d31ed938f4aa7c958ed0d4c3ffcb2e780/hyperstream/stream/stream.py#L246-L256
def calculated_intervals(self, intervals): """ Updates the calculated intervals in the database. Performs an upsert :param intervals: The calculated intervals :return: None """ logging.debug("set calculated intervals") self.mongo_model.set_calculated_intervals(intervals) self.save() self._calculated_intervals = TimeIntervals(intervals)
[ "def", "calculated_intervals", "(", "self", ",", "intervals", ")", ":", "logging", ".", "debug", "(", "\"set calculated intervals\"", ")", "self", ".", "mongo_model", ".", "set_calculated_intervals", "(", "intervals", ")", "self", ".", "save", "(", ")", "self", ...
Updates the calculated intervals in the database. Performs an upsert :param intervals: The calculated intervals :return: None
[ "Updates", "the", "calculated", "intervals", "in", "the", "database", ".", "Performs", "an", "upsert" ]
python
train
36.363636
bukun/TorCMS
torcms/model/post_model.py
https://github.com/bukun/TorCMS/blob/6567c7fe2604a1d646d4570c017840958630ed2b/torcms/model/post_model.py#L272-L304
def query_recent(num=8, **kwargs): ''' query recent posts. ''' order_by_create = kwargs.get('order_by_create', False) kind = kwargs.get('kind', None) if order_by_create: if kind: recent_recs = TabPost.select().where( (TabPost.kind == kind) & (TabPost.valid == 1) ).order_by( TabPost.time_create.desc() ).limit(num) else: recent_recs = TabPost.select().where( TabPost.valid == 1 ).order_by( TabPost.time_create.desc() ).limit(num) else: if kind: recent_recs = TabPost.select().where( (TabPost.kind == kind) & (TabPost.valid == 1) ).order_by( TabPost.time_update.desc() ).limit(num) else: recent_recs = TabPost.select().where( TabPost.valid == 1 ).order_by( TabPost.time_update.desc() ).limit(num) return recent_recs
[ "def", "query_recent", "(", "num", "=", "8", ",", "*", "*", "kwargs", ")", ":", "order_by_create", "=", "kwargs", ".", "get", "(", "'order_by_create'", ",", "False", ")", "kind", "=", "kwargs", ".", "get", "(", "'kind'", ",", "None", ")", "if", "orde...
query recent posts.
[ "query", "recent", "posts", "." ]
python
train
34.727273
fabioz/PyDev.Debugger
_pydevd_bundle/pydevd_comm.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/_pydevd_bundle/pydevd_comm.py#L1047-L1060
def internal_get_description(dbg, seq, thread_id, frame_id, expression): ''' Fetch the variable description stub from the debug console ''' try: frame = dbg.find_frame(thread_id, frame_id) description = pydevd_console.get_description(frame, thread_id, frame_id, expression) description = pydevd_xml.make_valid_xml_value(quote(description, '/>_= \t')) description_xml = '<xml><var name="" type="" value="%s"/></xml>' % description cmd = dbg.cmd_factory.make_get_description_message(seq, description_xml) dbg.writer.add_command(cmd) except: exc = get_exception_traceback_str() cmd = dbg.cmd_factory.make_error_message(seq, "Error in fetching description" + exc) dbg.writer.add_command(cmd)
[ "def", "internal_get_description", "(", "dbg", ",", "seq", ",", "thread_id", ",", "frame_id", ",", "expression", ")", ":", "try", ":", "frame", "=", "dbg", ".", "find_frame", "(", "thread_id", ",", "frame_id", ")", "description", "=", "pydevd_console", ".", ...
Fetch the variable description stub from the debug console
[ "Fetch", "the", "variable", "description", "stub", "from", "the", "debug", "console" ]
python
train
54.428571
spyder-ide/spyder
spyder/widgets/tabs.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/widgets/tabs.py#L123-L132
def edit_finished(self): """On clean exit, update tab name.""" # Hides editor self.hide() if isinstance(self.tab_index, int) and self.tab_index >= 0: # We are editing a valid tab, update name tab_text = to_text_string(self.text()) self.main.setTabText(self.tab_index, tab_text) self.main.sig_change_name.emit(tab_text)
[ "def", "edit_finished", "(", "self", ")", ":", "# Hides editor\r", "self", ".", "hide", "(", ")", "if", "isinstance", "(", "self", ".", "tab_index", ",", "int", ")", "and", "self", ".", "tab_index", ">=", "0", ":", "# We are editing a valid tab, update name\r"...
On clean exit, update tab name.
[ "On", "clean", "exit", "update", "tab", "name", "." ]
python
train
39.9
pandas-dev/pandas
doc/source/conf.py
https://github.com/pandas-dev/pandas/blob/9feb3ad92cc0397a04b665803a49299ee7aa1037/doc/source/conf.py#L688-L724
def process_class_docstrings(app, what, name, obj, options, lines): """ For those classes for which we use :: :template: autosummary/class_without_autosummary.rst the documented attributes/methods have to be listed in the class docstring. However, if one of those lists is empty, we use 'None', which then generates warnings in sphinx / ugly html output. This "autodoc-process-docstring" event connector removes that part from the processed docstring. """ if what == "class": joined = '\n'.join(lines) templates = [ """.. rubric:: Attributes .. autosummary:: :toctree: None """, """.. rubric:: Methods .. autosummary:: :toctree: None """ ] for template in templates: if template in joined: joined = joined.replace(template, '') lines[:] = joined.split('\n')
[ "def", "process_class_docstrings", "(", "app", ",", "what", ",", "name", ",", "obj", ",", "options", ",", "lines", ")", ":", "if", "what", "==", "\"class\"", ":", "joined", "=", "'\\n'", ".", "join", "(", "lines", ")", "templates", "=", "[", "\"\"\".. ...
For those classes for which we use :: :template: autosummary/class_without_autosummary.rst the documented attributes/methods have to be listed in the class docstring. However, if one of those lists is empty, we use 'None', which then generates warnings in sphinx / ugly html output. This "autodoc-process-docstring" event connector removes that part from the processed docstring.
[ "For", "those", "classes", "for", "which", "we", "use", "::" ]
python
train
23.621622
pytroll/trollimage
trollimage/xrimage.py
https://github.com/pytroll/trollimage/blob/d35a7665ad475ff230e457085523e21f2cd3f454/trollimage/xrimage.py#L447-L463
def _add_alpha(self, data, alpha=None): """Create an alpha channel and concatenate it to the provided data. If ``data`` is an integer type then the alpha band will be scaled to use the smallest (min) value as fully transparent and the largest (max) value as fully opaque. For float types the alpha band spans 0 to 1. """ null_mask = alpha if alpha is not None else self._create_alpha(data) # if we are using integer data, then alpha needs to be min-int to max-int # otherwise for floats we want 0 to 1 if np.issubdtype(data.dtype, np.integer): # xarray sometimes upcasts this calculation, so cast again null_mask = self._scale_to_dtype(null_mask, data.dtype).astype(data.dtype) data = xr.concat([data, null_mask], dim="bands") return data
[ "def", "_add_alpha", "(", "self", ",", "data", ",", "alpha", "=", "None", ")", ":", "null_mask", "=", "alpha", "if", "alpha", "is", "not", "None", "else", "self", ".", "_create_alpha", "(", "data", ")", "# if we are using integer data, then alpha needs to be min...
Create an alpha channel and concatenate it to the provided data. If ``data`` is an integer type then the alpha band will be scaled to use the smallest (min) value as fully transparent and the largest (max) value as fully opaque. For float types the alpha band spans 0 to 1.
[ "Create", "an", "alpha", "channel", "and", "concatenate", "it", "to", "the", "provided", "data", "." ]
python
train
49.647059
wakatime/wakatime
wakatime/stats.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/stats.py#L136-L162
def guess_lexer_using_modeline(text): """Guess lexer for given text using Vim modeline. Returns a tuple of (lexer, accuracy). """ lexer, accuracy = None, None file_type = None try: file_type = get_filetype_from_buffer(text) except: # pragma: nocover log.traceback(logging.DEBUG) if file_type is not None: try: lexer = get_lexer_by_name(file_type) except ClassNotFound: log.traceback(logging.DEBUG) if lexer is not None: try: accuracy = lexer.analyse_text(text) except: # pragma: nocover log.traceback(logging.DEBUG) return lexer, accuracy
[ "def", "guess_lexer_using_modeline", "(", "text", ")", ":", "lexer", ",", "accuracy", "=", "None", ",", "None", "file_type", "=", "None", "try", ":", "file_type", "=", "get_filetype_from_buffer", "(", "text", ")", "except", ":", "# pragma: nocover", "log", "."...
Guess lexer for given text using Vim modeline. Returns a tuple of (lexer, accuracy).
[ "Guess", "lexer", "for", "given", "text", "using", "Vim", "modeline", "." ]
python
train
24.296296
has2k1/plotnine
plotnine/geoms/geom_path.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/geoms/geom_path.py#L276-L338
def get_paths(self, x1, y1, x2, y2, panel_params, coord, ax): """ Compute paths that create the arrow heads Parameters ---------- x1, y1, x2, y2 : array_like List of points that define the tails of the arrows. The arrow heads will be at x1, y1. If you need them at x2, y2 reverse the input. Returns ------- out : list of Path Paths that create arrow heads """ Path = mpath.Path # Create reusable lists of vertices and codes # arrowhead path has 3 vertices (Nones), # plus dummy vertex for the STOP code verts = [None, None, None, (0, 0)] # codes list remains the same after initialization codes = [Path.MOVETO, Path.LINETO, Path.LINETO, Path.STOP] # Slices into the vertices list slc = slice(0, 3) # We need the plot dimensions so that we can # compute scaling factors fig = ax.get_figure() width, height = fig.get_size_inches() ranges = coord.range(panel_params) width_ = np.ptp(ranges.x) height_ = np.ptp(ranges.y) # scaling factors to prevent skewed arrowheads lx = self.length * width_/width ly = self.length * height_/height # angle in radians a = self.angle * np.pi / 180 # direction of arrow head xdiff, ydiff = x2 - x1, y2 - y1 rotations = np.arctan2(ydiff/ly, xdiff/lx) # Arrow head vertices v1x = x1 + lx * np.cos(rotations + a) v1y = y1 + ly * np.sin(rotations + a) v2x = x1 + lx * np.cos(rotations - a) v2y = y1 + ly * np.sin(rotations - a) # create a path for each arrow head paths = [] for t in zip(v1x, v1y, x1, y1, v2x, v2y): verts[slc] = [t[:2], t[2:4], t[4:]] paths.append(Path(verts, codes)) return paths
[ "def", "get_paths", "(", "self", ",", "x1", ",", "y1", ",", "x2", ",", "y2", ",", "panel_params", ",", "coord", ",", "ax", ")", ":", "Path", "=", "mpath", ".", "Path", "# Create reusable lists of vertices and codes", "# arrowhead path has 3 vertices (Nones),", "...
Compute paths that create the arrow heads Parameters ---------- x1, y1, x2, y2 : array_like List of points that define the tails of the arrows. The arrow heads will be at x1, y1. If you need them at x2, y2 reverse the input. Returns ------- out : list of Path Paths that create arrow heads
[ "Compute", "paths", "that", "create", "the", "arrow", "heads" ]
python
train
30.396825
tsroten/pynlpir
pynlpir/cli.py
https://github.com/tsroten/pynlpir/blob/8d5e994796a2b5d513f7db8d76d7d24a85d531b1/pynlpir/cli.py#L34-L68
def update_license_file(data_dir): """Update NLPIR license file if it is out-of-date or missing. :param str data_dir: The NLPIR data directory that houses the license. :returns bool: Whether or not an update occurred. """ license_file = os.path.join(data_dir, LICENSE_FILENAME) temp_dir = tempfile.mkdtemp() gh_license_filename = os.path.join(temp_dir, LICENSE_FILENAME) try: _, headers = urlretrieve(LICENSE_URL, gh_license_filename) except IOError as e: # Python 2 uses the unhelpful IOError for this. Re-raise as the more # appropriate URLError. raise URLError(e.strerror) with open(gh_license_filename, 'rb') as f: github_license = f.read() try: with open(license_file, 'rb') as f: current_license = f.read() except (IOError, OSError): current_license = b'' github_digest = hashlib.sha256(github_license).hexdigest() current_digest = hashlib.sha256(current_license).hexdigest() if github_digest == current_digest: return False shutil.copyfile(gh_license_filename, license_file) shutil.rmtree(temp_dir, ignore_errors=True) return True
[ "def", "update_license_file", "(", "data_dir", ")", ":", "license_file", "=", "os", ".", "path", ".", "join", "(", "data_dir", ",", "LICENSE_FILENAME", ")", "temp_dir", "=", "tempfile", ".", "mkdtemp", "(", ")", "gh_license_filename", "=", "os", ".", "path",...
Update NLPIR license file if it is out-of-date or missing. :param str data_dir: The NLPIR data directory that houses the license. :returns bool: Whether or not an update occurred.
[ "Update", "NLPIR", "license", "file", "if", "it", "is", "out", "-", "of", "-", "date", "or", "missing", "." ]
python
train
33.085714
hMatoba/Piexif
piexif/_common.py
https://github.com/hMatoba/Piexif/blob/afd0d232cf05cf530423f4b2a82ab291f150601a/piexif/_common.py#L6-L27
def split_into_segments(data): """Slices JPEG meta data into a list from JPEG binary data. """ if data[0:2] != b"\xff\xd8": raise InvalidImageDataError("Given data isn't JPEG.") head = 2 segments = [b"\xff\xd8"] while 1: if data[head: head + 2] == b"\xff\xda": segments.append(data[head:]) break else: length = struct.unpack(">H", data[head + 2: head + 4])[0] endPoint = head + length + 2 seg = data[head: endPoint] segments.append(seg) head = endPoint if (head >= len(data)): raise InvalidImageDataError("Wrong JPEG data.") return segments
[ "def", "split_into_segments", "(", "data", ")", ":", "if", "data", "[", "0", ":", "2", "]", "!=", "b\"\\xff\\xd8\"", ":", "raise", "InvalidImageDataError", "(", "\"Given data isn't JPEG.\"", ")", "head", "=", "2", "segments", "=", "[", "b\"\\xff\\xd8\"", "]", ...
Slices JPEG meta data into a list from JPEG binary data.
[ "Slices", "JPEG", "meta", "data", "into", "a", "list", "from", "JPEG", "binary", "data", "." ]
python
train
30.772727
openstax/cnx-publishing
cnxpublishing/views/user_actions.py
https://github.com/openstax/cnx-publishing/blob/f55b4a2c45d8618737288f1b74b4139d5ac74154/cnxpublishing/views/user_actions.py#L235-L257
def get_acl(request): """Returns the ACL for the given content identified by ``uuid``.""" uuid_ = request.matchdict['uuid'] with db_connect() as db_conn: with db_conn.cursor() as cursor: cursor.execute("""\ SELECT TRUE FROM document_controls WHERE uuid = %s""", (uuid_,)) try: # Check that it exists cursor.fetchone()[0] except TypeError: raise httpexceptions.HTTPNotFound() cursor.execute("""\ SELECT row_to_json(combined_rows) FROM ( SELECT uuid, user_id AS uid, permission FROM document_acl AS acl WHERE uuid = %s ORDER BY user_id ASC, permission ASC ) as combined_rows""", (uuid_,)) acl = [r[0] for r in cursor.fetchall()] return acl
[ "def", "get_acl", "(", "request", ")", ":", "uuid_", "=", "request", ".", "matchdict", "[", "'uuid'", "]", "with", "db_connect", "(", ")", "as", "db_conn", ":", "with", "db_conn", ".", "cursor", "(", ")", "as", "cursor", ":", "cursor", ".", "execute", ...
Returns the ACL for the given content identified by ``uuid``.
[ "Returns", "the", "ACL", "for", "the", "given", "content", "identified", "by", "uuid", "." ]
python
valid
32.565217
Parallels/artifactory
artifactory.py
https://github.com/Parallels/artifactory/blob/09ddcc4ae15095eec2347d39774c3f8aca6c4654/artifactory.py#L420-L426
def rest_put(self, url, params=None, headers=None, auth=None, verify=True, cert=None): """ Perform a PUT request to url with optional authentication """ res = requests.put(url, params=params, headers=headers, auth=auth, verify=verify, cert=cert) return res.text, res.status_code
[ "def", "rest_put", "(", "self", ",", "url", ",", "params", "=", "None", ",", "headers", "=", "None", ",", "auth", "=", "None", ",", "verify", "=", "True", ",", "cert", "=", "None", ")", ":", "res", "=", "requests", ".", "put", "(", "url", ",", ...
Perform a PUT request to url with optional authentication
[ "Perform", "a", "PUT", "request", "to", "url", "with", "optional", "authentication" ]
python
train
48.428571
csparpa/pyowm
pyowm/weatherapi25/owm25.py
https://github.com/csparpa/pyowm/blob/cdd59eb72f32f7238624ceef9b2e2329a5ebd472/pyowm/weatherapi25/owm25.py#L338-L378
def weather_at_places(self, pattern, searchtype, limit=None): """ Queries the OWM Weather API for the currently observed weather in all the locations whose name is matching the specified text search parameters. A twofold search can be issued: *'accurate'* (exact matching) and *'like'* (matches names that are similar to the supplied pattern). :param pattern: the string pattern (not a regex) to be searched for the toponym :type pattern: str :param searchtype: the search mode to be used, must be *'accurate'* for an exact matching or *'like'* for a likelihood matching :type: searchtype: str :param limit: the maximum number of *Observation* items in the returned list (default is ``None``, which stands for any number of items) :param limit: int or ``None`` :returns: a list of *Observation* objects or ``None`` if no weather data is available :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* when bad value is supplied for the search type or the maximum number of items retrieved """ assert isinstance(pattern, str), "'pattern' must be a str" assert isinstance(searchtype, str), "'searchtype' must be a str" if searchtype != "accurate" and searchtype != "like": raise ValueError("'searchtype' value must be 'accurate' or 'like'") if limit is not None: assert isinstance(limit, int), "'limit' must be an int or None" if limit < 1: raise ValueError("'limit' must be None or greater than zero") params = {'q': pattern, 'type': searchtype, 'lang': self._language} if limit is not None: # fix for OWM 2.5 API bug! params['cnt'] = limit - 1 uri = http_client.HttpClient.to_url(FIND_OBSERVATIONS_URL, self._API_key, self._subscription_type, self._use_ssl) _, json_data = self._wapi.cacheable_get_json(uri, params=params) return self._parsers['observation_list'].parse_JSON(json_data)
[ "def", "weather_at_places", "(", "self", ",", "pattern", ",", "searchtype", ",", "limit", "=", "None", ")", ":", "assert", "isinstance", "(", "pattern", ",", "str", ")", ",", "\"'pattern' must be a str\"", "assert", "isinstance", "(", "searchtype", ",", "str",...
Queries the OWM Weather API for the currently observed weather in all the locations whose name is matching the specified text search parameters. A twofold search can be issued: *'accurate'* (exact matching) and *'like'* (matches names that are similar to the supplied pattern). :param pattern: the string pattern (not a regex) to be searched for the toponym :type pattern: str :param searchtype: the search mode to be used, must be *'accurate'* for an exact matching or *'like'* for a likelihood matching :type: searchtype: str :param limit: the maximum number of *Observation* items in the returned list (default is ``None``, which stands for any number of items) :param limit: int or ``None`` :returns: a list of *Observation* objects or ``None`` if no weather data is available :raises: *ParseResponseException* when OWM Weather API responses' data cannot be parsed, *APICallException* when OWM Weather API can not be reached, *ValueError* when bad value is supplied for the search type or the maximum number of items retrieved
[ "Queries", "the", "OWM", "Weather", "API", "for", "the", "currently", "observed", "weather", "in", "all", "the", "locations", "whose", "name", "is", "matching", "the", "specified", "text", "search", "parameters", ".", "A", "twofold", "search", "can", "be", "...
python
train
56.780488
frmdstryr/enamlx
enamlx/core/block.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/core/block.py#L35-L48
def _observe_block(self, change): """ A change handler for the 'objects' list of the Include. If the object is initialized objects which are removed will be unparented and objects which are added will be reparented. Old objects will be destroyed if the 'destroy_old' flag is True. """ if self.is_initialized: if change['type'] == 'update': old_block = change['oldvalue'] old_block.parent.remove_children(old_block,self.children) new_block = change['value'] new_block.parent.insert_children(new_block, self.children)
[ "def", "_observe_block", "(", "self", ",", "change", ")", ":", "if", "self", ".", "is_initialized", ":", "if", "change", "[", "'type'", "]", "==", "'update'", ":", "old_block", "=", "change", "[", "'oldvalue'", "]", "old_block", ".", "parent", ".", "remo...
A change handler for the 'objects' list of the Include. If the object is initialized objects which are removed will be unparented and objects which are added will be reparented. Old objects will be destroyed if the 'destroy_old' flag is True.
[ "A", "change", "handler", "for", "the", "objects", "list", "of", "the", "Include", "." ]
python
train
44.857143
twilio/twilio-python
twilio/rest/api/v2010/account/conference/__init__.py
https://github.com/twilio/twilio-python/blob/c867895f55dcc29f522e6e8b8868d0d18483132f/twilio/rest/api/v2010/account/conference/__init__.py#L307-L332
def update(self, status=values.unset, announce_url=values.unset, announce_method=values.unset): """ Update the ConferenceInstance :param ConferenceInstance.UpdateStatus status: The new status of the resource :param unicode announce_url: The URL we should call to announce something into the conference :param unicode announce_method: he HTTP method used to call announce_url :returns: Updated ConferenceInstance :rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance """ data = values.of({'Status': status, 'AnnounceUrl': announce_url, 'AnnounceMethod': announce_method, }) payload = self._version.update( 'POST', self._uri, data=data, ) return ConferenceInstance( self._version, payload, account_sid=self._solution['account_sid'], sid=self._solution['sid'], )
[ "def", "update", "(", "self", ",", "status", "=", "values", ".", "unset", ",", "announce_url", "=", "values", ".", "unset", ",", "announce_method", "=", "values", ".", "unset", ")", ":", "data", "=", "values", ".", "of", "(", "{", "'Status'", ":", "s...
Update the ConferenceInstance :param ConferenceInstance.UpdateStatus status: The new status of the resource :param unicode announce_url: The URL we should call to announce something into the conference :param unicode announce_method: he HTTP method used to call announce_url :returns: Updated ConferenceInstance :rtype: twilio.rest.api.v2010.account.conference.ConferenceInstance
[ "Update", "the", "ConferenceInstance" ]
python
train
36.730769
UDST/orca
orca/orca.py
https://github.com/UDST/orca/blob/07b34aeef13cc87c966b2e30cbe7e76cc9d3622c/orca/orca.py#L1263-L1304
def _memoize_function(f, name, cache_scope=_CS_FOREVER): """ Wraps a function for memoization and ties it's cache into the Orca cacheing system. Parameters ---------- f : function name : str Name of injectable. cache_scope : {'step', 'iteration', 'forever'}, optional Scope for which to cache data. Default is to cache forever (or until manually cleared). 'iteration' caches data for each complete iteration of the pipeline, 'step' caches data for a single step of the pipeline. """ cache = {} @wraps(f) def wrapper(*args, **kwargs): try: cache_key = ( args or None, frozenset(kwargs.items()) if kwargs else None) in_cache = cache_key in cache except TypeError: raise TypeError( 'function arguments must be hashable for memoization') if _CACHING and in_cache: return cache[cache_key] else: result = f(*args, **kwargs) cache[cache_key] = result return result wrapper.__wrapped__ = f wrapper.cache = cache wrapper.clear_cached = lambda: cache.clear() _MEMOIZED[name] = CacheItem(name, wrapper, cache_scope) return wrapper
[ "def", "_memoize_function", "(", "f", ",", "name", ",", "cache_scope", "=", "_CS_FOREVER", ")", ":", "cache", "=", "{", "}", "@", "wraps", "(", "f", ")", "def", "wrapper", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "try", ":", "cache_key"...
Wraps a function for memoization and ties it's cache into the Orca cacheing system. Parameters ---------- f : function name : str Name of injectable. cache_scope : {'step', 'iteration', 'forever'}, optional Scope for which to cache data. Default is to cache forever (or until manually cleared). 'iteration' caches data for each complete iteration of the pipeline, 'step' caches data for a single step of the pipeline.
[ "Wraps", "a", "function", "for", "memoization", "and", "ties", "it", "s", "cache", "into", "the", "Orca", "cacheing", "system", "." ]
python
train
29.52381
sirfoga/pyhal
hal/wrappers/errors.py
https://github.com/sirfoga/pyhal/blob/4394d8a1f7e45bea28a255ec390f4962ee64d33a/hal/wrappers/errors.py#L8-L32
def true_false_returns(func): """Executes function, if error returns False, else True :param func: function to call :return: True iff ok, else False """ @functools.wraps(func) def _execute(*args, **kwargs): """Executes function, if error returns False, else True :param args: args of function :param kwargs: extra args of function :param *args: args :param **kwargs: extra args :return: True iff ok, else False """ try: func(*args, **kwargs) return True except: return False return _execute
[ "def", "true_false_returns", "(", "func", ")", ":", "@", "functools", ".", "wraps", "(", "func", ")", "def", "_execute", "(", "*", "args", ",", "*", "*", "kwargs", ")", ":", "\"\"\"Executes function, if error returns False, else True\n\n :param args: args of fu...
Executes function, if error returns False, else True :param func: function to call :return: True iff ok, else False
[ "Executes", "function", "if", "error", "returns", "False", "else", "True" ]
python
train
24.28
chaoss/grimoirelab-manuscripts
manuscripts2/elasticsearch.py
https://github.com/chaoss/grimoirelab-manuscripts/blob/94a3ad4f11bfbcd6c5190e01cb5d3e47a5187cd9/manuscripts2/elasticsearch.py#L247-L259
def add_custom_aggregation(self, agg, name=None): """ Takes in an es_dsl Aggregation object and adds it to the aggregation dict. Can be used to add custom aggregations such as moving averages :param agg: aggregation to be added to the es_dsl search object :param name: name of the aggregation object (optional) :returns: self, which allows the method to be chainable with the other methods """ agg_name = name if name else 'custom_agg' self.aggregations[agg_name] = agg return self
[ "def", "add_custom_aggregation", "(", "self", ",", "agg", ",", "name", "=", "None", ")", ":", "agg_name", "=", "name", "if", "name", "else", "'custom_agg'", "self", ".", "aggregations", "[", "agg_name", "]", "=", "agg", "return", "self" ]
Takes in an es_dsl Aggregation object and adds it to the aggregation dict. Can be used to add custom aggregations such as moving averages :param agg: aggregation to be added to the es_dsl search object :param name: name of the aggregation object (optional) :returns: self, which allows the method to be chainable with the other methods
[ "Takes", "in", "an", "es_dsl", "Aggregation", "object", "and", "adds", "it", "to", "the", "aggregation", "dict", ".", "Can", "be", "used", "to", "add", "custom", "aggregations", "such", "as", "moving", "averages" ]
python
train
42.384615
array-split/array_split
array_split/split.py
https://github.com/array-split/array_split/blob/e07abe3001209394dde809f7e6f505f9f49a1c26/array_split/split.py#L795-L807
def convert_halo_to_array_form(self, halo): """ Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)` shaped array. :type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array of :samp:`int` :param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form. :rtype: :obj:`numpy.ndarray` :return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements. """ return convert_halo_to_array_form(halo=halo, ndim=len(self.array_shape))
[ "def", "convert_halo_to_array_form", "(", "self", ",", "halo", ")", ":", "return", "convert_halo_to_array_form", "(", "halo", "=", "halo", ",", "ndim", "=", "len", "(", "self", ".", "array_shape", ")", ")" ]
Converts the :samp:`{halo}` argument to a :samp:`({self}.array_shape.size, 2)` shaped array. :type halo: :samp:`None`, :obj:`int`, :samp:`self.array_shape.size` length sequence of :samp:`int` or :samp:`(self.array_shape.size, 2)` shaped array of :samp:`int` :param halo: Halo to be converted to :samp:`(len(self.array_shape), 2)` shaped array form. :rtype: :obj:`numpy.ndarray` :return: A :samp:`(len(self.array_shape), 2)` shaped array of :obj:`numpy.int64` elements.
[ "Converts", "the", ":", "samp", ":", "{", "halo", "}", "argument", "to", "a", ":", "samp", ":", "(", "{", "self", "}", ".", "array_shape", ".", "size", "2", ")", "shaped", "array", "." ]
python
train
52.153846
poldracklab/niworkflows
niworkflows/utils/bids.py
https://github.com/poldracklab/niworkflows/blob/254f4b4fcc5e6ecb29d2f4602a30786b913ecce5/niworkflows/utils/bids.py#L44-L118
def collect_participants(bids_dir, participant_label=None, strict=False, bids_validate=True): """ List the participants under the BIDS root and checks that participants designated with the participant_label argument exist in that folder. Returns the list of participants to be finally processed. Requesting all subjects in a BIDS directory root: >>> collect_participants(str(datadir / 'ds114'), bids_validate=False) ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10'] Requesting two subjects, given their IDs: >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'], ... bids_validate=False) ['02', '04'] Requesting two subjects, given their IDs (works with 'sub-' prefixes): >>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'], ... bids_validate=False) ['02', '04'] Requesting two subjects, but one does not exist: >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'], ... bids_validate=False) ['02'] >>> collect_participants( ... str(datadir / 'ds114'), participant_label=['02', '14'], ... strict=True, bids_validate=False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): fmriprep.utils.bids.BIDSError: ... """ if isinstance(bids_dir, BIDSLayout): layout = bids_dir else: layout = BIDSLayout(str(bids_dir), validate=bids_validate) all_participants = set(layout.get_subjects()) # Error: bids_dir does not contain subjects if not all_participants: raise BIDSError( 'Could not find participants. Please make sure the BIDS data ' 'structure is present and correct. Datasets can be validated online ' 'using the BIDS Validator (http://bids-standard.github.io/bids-validator/).\n' 'If you are using Docker for Mac or Docker for Windows, you ' 'may need to adjust your "File sharing" preferences.', bids_dir) # No --participant-label was set, return all if not participant_label: return sorted(all_participants) if isinstance(participant_label, str): participant_label = [participant_label] # Drop sub- prefixes participant_label = [sub[4:] if sub.startswith('sub-') else sub for sub in participant_label] # Remove duplicates participant_label = sorted(set(participant_label)) # Remove labels not found found_label = sorted(set(participant_label) & all_participants) if not found_label: raise BIDSError('Could not find participants [{}]'.format( ', '.join(participant_label)), bids_dir) # Warn if some IDs were not found notfound_label = sorted(set(participant_label) - all_participants) if notfound_label: exc = BIDSError('Some participants were not found: {}'.format( ', '.join(notfound_label)), bids_dir) if strict: raise exc warnings.warn(exc.msg, BIDSWarning) return found_label
[ "def", "collect_participants", "(", "bids_dir", ",", "participant_label", "=", "None", ",", "strict", "=", "False", ",", "bids_validate", "=", "True", ")", ":", "if", "isinstance", "(", "bids_dir", ",", "BIDSLayout", ")", ":", "layout", "=", "bids_dir", "els...
List the participants under the BIDS root and checks that participants designated with the participant_label argument exist in that folder. Returns the list of participants to be finally processed. Requesting all subjects in a BIDS directory root: >>> collect_participants(str(datadir / 'ds114'), bids_validate=False) ['01', '02', '03', '04', '05', '06', '07', '08', '09', '10'] Requesting two subjects, given their IDs: >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '04'], ... bids_validate=False) ['02', '04'] Requesting two subjects, given their IDs (works with 'sub-' prefixes): >>> collect_participants(str(datadir / 'ds114'), participant_label=['sub-02', 'sub-04'], ... bids_validate=False) ['02', '04'] Requesting two subjects, but one does not exist: >>> collect_participants(str(datadir / 'ds114'), participant_label=['02', '14'], ... bids_validate=False) ['02'] >>> collect_participants( ... str(datadir / 'ds114'), participant_label=['02', '14'], ... strict=True, bids_validate=False) # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): fmriprep.utils.bids.BIDSError: ...
[ "List", "the", "participants", "under", "the", "BIDS", "root", "and", "checks", "that", "participants", "designated", "with", "the", "participant_label", "argument", "exist", "in", "that", "folder", ".", "Returns", "the", "list", "of", "participants", "to", "be"...
python
train
41.053333
andreikop/qutepart
qutepart/brackethlighter.py
https://github.com/andreikop/qutepart/blob/109d76b239751318bcef06f39b2fbbf18687a40b/qutepart/brackethlighter.py#L75-L96
def _findMatchingBracket(self, bracket, qpart, block, columnIndex): """Find matching bracket for the bracket. Return (block, columnIndex) or (None, None) Raise _TimeoutException, if time is over """ if bracket in self._START_BRACKETS: charsGenerator = self._iterateDocumentCharsForward(block, columnIndex + 1) else: charsGenerator = self._iterateDocumentCharsBackward(block, columnIndex) depth = 1 oposite = self._OPOSITE_BRACKET[bracket] for block, columnIndex, char in charsGenerator: if qpart.isCode(block, columnIndex): if char == oposite: depth -= 1 if depth == 0: return block, columnIndex elif char == bracket: depth += 1 else: return None, None
[ "def", "_findMatchingBracket", "(", "self", ",", "bracket", ",", "qpart", ",", "block", ",", "columnIndex", ")", ":", "if", "bracket", "in", "self", ".", "_START_BRACKETS", ":", "charsGenerator", "=", "self", ".", "_iterateDocumentCharsForward", "(", "block", ...
Find matching bracket for the bracket. Return (block, columnIndex) or (None, None) Raise _TimeoutException, if time is over
[ "Find", "matching", "bracket", "for", "the", "bracket", ".", "Return", "(", "block", "columnIndex", ")", "or", "(", "None", "None", ")", "Raise", "_TimeoutException", "if", "time", "is", "over" ]
python
train
39.818182
PyGithub/PyGithub
github/PullRequest.py
https://github.com/PyGithub/PyGithub/blob/f716df86bbe7dc276c6596699fa9712b61ef974c/github/PullRequest.py#L754-L778
def merge(self, commit_message=github.GithubObject.NotSet, commit_title=github.GithubObject.NotSet, merge_method=github.GithubObject.NotSet, sha=github.GithubObject.NotSet): """ :calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_ :param commit_message: string :rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus` """ assert commit_message is github.GithubObject.NotSet or isinstance(commit_message, (str, unicode)), commit_message assert commit_title is github.GithubObject.NotSet or isinstance(commit_title, (str, unicode)), commit_title assert merge_method is github.GithubObject.NotSet or isinstance(merge_method, (str, unicode)), merge_method assert sha is github.GithubObject.NotSet or isinstance(sha, (str, unicode)), sha post_parameters = dict() if commit_message is not github.GithubObject.NotSet: post_parameters["commit_message"] = commit_message if commit_title is not github.GithubObject.NotSet: post_parameters["commit_title"] = commit_title if merge_method is not github.GithubObject.NotSet: post_parameters["merge_method"] = merge_method if sha is not github.GithubObject.NotSet: post_parameters["sha"] = sha headers, data = self._requester.requestJsonAndCheck( "PUT", self.url + "/merge", input=post_parameters ) return github.PullRequestMergeStatus.PullRequestMergeStatus(self._requester, headers, data, completed=True)
[ "def", "merge", "(", "self", ",", "commit_message", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "commit_title", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "merge_method", "=", "github", ".", "GithubObject", ".", "NotSet", ",", "sha",...
:calls: `PUT /repos/:owner/:repo/pulls/:number/merge <http://developer.github.com/v3/pulls>`_ :param commit_message: string :rtype: :class:`github.PullRequestMergeStatus.PullRequestMergeStatus`
[ ":", "calls", ":", "PUT", "/", "repos", "/", ":", "owner", "/", ":", "repo", "/", "pulls", "/", ":", "number", "/", "merge", "<http", ":", "//", "developer", ".", "github", ".", "com", "/", "v3", "/", "pulls", ">", "_", ":", "param", "commit_mess...
python
train
63.64
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/tex.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/tex.py#L495-L560
def is_LaTeX(flist,env,abspath): """Scan a file list to decide if it's TeX- or LaTeX-flavored.""" # We need to scan files that are included in case the # \documentclass command is in them. # get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS'] savedpath = modify_env_var(env, 'TEXINPUTS', abspath) paths = env['ENV']['TEXINPUTS'] if SCons.Util.is_List(paths): pass else: # Split at os.pathsep to convert into absolute path paths = paths.split(os.pathsep) # now that we have the path list restore the env if savedpath is _null: try: del env['ENV']['TEXINPUTS'] except KeyError: pass # was never set else: env['ENV']['TEXINPUTS'] = savedpath if Verbose: print("is_LaTeX search path ",paths) print("files to search :",flist) # Now that we have the search path and file list, check each one for f in flist: if Verbose: print(" checking for Latex source ",str(f)) content = f.get_text_contents() if LaTeX_re.search(content): if Verbose: print("file %s is a LaTeX file" % str(f)) return 1 if Verbose: print("file %s is not a LaTeX file" % str(f)) # now find included files inc_files = [ ] inc_files.extend( include_re.findall(content) ) if Verbose: print("files included by '%s': "%str(f),inc_files) # inc_files is list of file names as given. need to find them # using TEXINPUTS paths. # search the included files for src in inc_files: srcNode = FindFile(src,['.tex','.ltx','.latex'],paths,env,requireExt=False) # make this a list since is_LaTeX takes a list. fileList = [srcNode,] if Verbose: print("FindFile found ",srcNode) if srcNode is not None: file_test = is_LaTeX(fileList, env, abspath) # return on first file that finds latex is needed. if file_test: return file_test if Verbose: print(" done scanning ",str(f)) return 0
[ "def", "is_LaTeX", "(", "flist", ",", "env", ",", "abspath", ")", ":", "# We need to scan files that are included in case the", "# \\documentclass command is in them.", "# get path list from both env['TEXINPUTS'] and env['ENV']['TEXINPUTS']", "savedpath", "=", "modify_env_var", "(", ...
Scan a file list to decide if it's TeX- or LaTeX-flavored.
[ "Scan", "a", "file", "list", "to", "decide", "if", "it", "s", "TeX", "-", "or", "LaTeX", "-", "flavored", "." ]
python
train
32.69697
KimiNewt/pyshark
src/pyshark/capture/inmem_capture.py
https://github.com/KimiNewt/pyshark/blob/089ea6208c4321f03bc548f491e00a053285918f/src/pyshark/capture/inmem_capture.py#L96-L118
def parse_packets(self, binary_packets): """ Parses binary packets and return a list of parsed packets. DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done working with it. """ if not binary_packets: raise ValueError("Must supply at least one packet") parsed_packets = [] if not self._current_tshark: self.eventloop.run_until_complete(self._get_tshark_process()) for binary_packet in binary_packets: self._write_packet(binary_packet) def callback(pkt): parsed_packets.append(pkt) if len(parsed_packets) == len(binary_packets): raise StopCapture() self.eventloop.run_until_complete(self._get_parsed_packet_from_tshark(callback)) return parsed_packets
[ "def", "parse_packets", "(", "self", ",", "binary_packets", ")", ":", "if", "not", "binary_packets", ":", "raise", "ValueError", "(", "\"Must supply at least one packet\"", ")", "parsed_packets", "=", "[", "]", "if", "not", "self", ".", "_current_tshark", ":", "...
Parses binary packets and return a list of parsed packets. DOES NOT CLOSE tshark. It must be closed manually by calling close() when you're done working with it.
[ "Parses", "binary", "packets", "and", "return", "a", "list", "of", "parsed", "packets", "." ]
python
train
36.391304
angr/angr
angr/state_plugins/heap/heap_base.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/state_plugins/heap/heap_base.py#L68-L76
def _malloc(self, sim_size): """ Handler for any libc `malloc` SimProcedure call. If the heap has faithful support for `malloc`, it ought to be implemented in a `malloc` function (as opposed to the `_malloc` function). :param sim_size: the amount of memory (in bytes) to be allocated """ raise NotImplementedError("%s not implemented for %s" % (self._malloc.__func__.__name__, self.__class__.__name__))
[ "def", "_malloc", "(", "self", ",", "sim_size", ")", ":", "raise", "NotImplementedError", "(", "\"%s not implemented for %s\"", "%", "(", "self", ".", "_malloc", ".", "__func__", ".", "__name__", ",", "self", ".", "__class__", ".", "__name__", ")", ")" ]
Handler for any libc `malloc` SimProcedure call. If the heap has faithful support for `malloc`, it ought to be implemented in a `malloc` function (as opposed to the `_malloc` function). :param sim_size: the amount of memory (in bytes) to be allocated
[ "Handler", "for", "any", "libc", "malloc", "SimProcedure", "call", ".", "If", "the", "heap", "has", "faithful", "support", "for", "malloc", "it", "ought", "to", "be", "implemented", "in", "a", "malloc", "function", "(", "as", "opposed", "to", "the", "_mall...
python
train
56.444444
tensorflow/tensorboard
tensorboard/plugins/hparams/backend_context.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/hparams/backend_context.py#L280-L295
def _protobuf_value_type(value): """Returns the type of the google.protobuf.Value message as an api.DataType. Returns None if the type of 'value' is not one of the types supported in api_pb2.DataType. Args: value: google.protobuf.Value message. """ if value.HasField("number_value"): return api_pb2.DATA_TYPE_FLOAT64 if value.HasField("string_value"): return api_pb2.DATA_TYPE_STRING if value.HasField("bool_value"): return api_pb2.DATA_TYPE_BOOL return None
[ "def", "_protobuf_value_type", "(", "value", ")", ":", "if", "value", ".", "HasField", "(", "\"number_value\"", ")", ":", "return", "api_pb2", ".", "DATA_TYPE_FLOAT64", "if", "value", ".", "HasField", "(", "\"string_value\"", ")", ":", "return", "api_pb2", "."...
Returns the type of the google.protobuf.Value message as an api.DataType. Returns None if the type of 'value' is not one of the types supported in api_pb2.DataType. Args: value: google.protobuf.Value message.
[ "Returns", "the", "type", "of", "the", "google", ".", "protobuf", ".", "Value", "message", "as", "an", "api", ".", "DataType", "." ]
python
train
29.9375
vaexio/vaex
packages/vaex-astro/vaex/astro/transformations.py
https://github.com/vaexio/vaex/blob/a45b672f8287afca2ada8e36b74b604b9b28dd85/packages/vaex-astro/vaex/astro/transformations.py#L5-L9
def patch(f): '''Adds method f to the Dataset class''' name = f.__name__ Dataset.__hidden__[name] = f return f
[ "def", "patch", "(", "f", ")", ":", "name", "=", "f", ".", "__name__", "Dataset", ".", "__hidden__", "[", "name", "]", "=", "f", "return", "f" ]
Adds method f to the Dataset class
[ "Adds", "method", "f", "to", "the", "Dataset", "class" ]
python
test
24.4
awslabs/sockeye
sockeye/inference.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/inference.py#L388-L508
def load_models(context: mx.context.Context, max_input_len: Optional[int], beam_size: int, batch_size: int, model_folders: List[str], checkpoints: Optional[List[int]] = None, softmax_temperature: Optional[float] = None, max_output_length_num_stds: int = C.DEFAULT_NUM_STD_MAX_OUTPUT_LENGTH, decoder_return_logit_inputs: bool = False, cache_output_layer_w_b: bool = False, forced_max_output_len: Optional[int] = None, override_dtype: Optional[str] = None, output_scores: bool = False, sampling: bool = False) -> Tuple[List[InferenceModel], List[vocab.Vocab], vocab.Vocab]: """ Loads a list of models for inference. :param context: MXNet context to bind modules to. :param max_input_len: Maximum input length. :param beam_size: Beam size. :param batch_size: Batch size. :param model_folders: List of model folders to load models from. :param checkpoints: List of checkpoints to use for each model in model_folders. Use None to load best checkpoint. :param softmax_temperature: Optional parameter to control steepness of softmax distribution. :param max_output_length_num_stds: Number of standard deviations to add to mean target-source length ratio to compute maximum output length. :param decoder_return_logit_inputs: Model decoders return inputs to logit computation instead of softmax over target vocabulary. Used when logits/softmax are handled separately. :param cache_output_layer_w_b: Models cache weights and biases for logit computation as NumPy arrays (used with restrict lexicon). :param forced_max_output_len: An optional overwrite of the maximum output length. :param override_dtype: Overrides dtype of encoder and decoder defined at training time to a different one. :param output_scores: Whether the scores will be needed as outputs. If True, scores will be normalized, negative log probabilities. If False, scores will be negative, raw logit activations if decoding with beam size 1 and a single model. :param sampling: True if the model is sampling instead of doing normal topk(). :return: List of models, source vocabulary, target vocabulary, source factor vocabularies. """ logger.info("Loading %d model(s) from %s ...", len(model_folders), model_folders) load_time_start = time.time() models = [] # type: List[InferenceModel] source_vocabs = [] # type: List[List[vocab.Vocab]] target_vocabs = [] # type: List[vocab.Vocab] if checkpoints is None: checkpoints = [None] * len(model_folders) else: utils.check_condition(len(checkpoints) == len(model_folders), "Must provide checkpoints for each model") skip_softmax = False # performance tweak: skip softmax for a single model, decoding with beam size 1, when not sampling and no scores are required in output. if len(model_folders) == 1 and beam_size == 1 and not output_scores and not sampling: skip_softmax = True logger.info("Enabled skipping softmax for a single model and greedy decoding.") for model_folder, checkpoint in zip(model_folders, checkpoints): model_source_vocabs = vocab.load_source_vocabs(model_folder) model_target_vocab = vocab.load_target_vocab(model_folder) source_vocabs.append(model_source_vocabs) target_vocabs.append(model_target_vocab) model_version = utils.load_version(os.path.join(model_folder, C.VERSION_NAME)) logger.info("Model version: %s", model_version) utils.check_version(model_version) model_config = model.SockeyeModel.load_config(os.path.join(model_folder, C.CONFIG_NAME)) logger.info("Disabling dropout layers for performance reasons") model_config.disable_dropout() if override_dtype is not None: model_config.config_encoder.dtype = override_dtype model_config.config_decoder.dtype = override_dtype if override_dtype == C.DTYPE_FP16: logger.warning('Experimental feature \'override_dtype=float16\' has been used. ' 'This feature may be removed or change its behaviour in future. ' 'DO NOT USE IT IN PRODUCTION!') if checkpoint is None: params_fname = os.path.join(model_folder, C.PARAMS_BEST_NAME) else: params_fname = os.path.join(model_folder, C.PARAMS_NAME % checkpoint) inference_model = InferenceModel(config=model_config, params_fname=params_fname, context=context, beam_size=beam_size, softmax_temperature=softmax_temperature, decoder_return_logit_inputs=decoder_return_logit_inputs, cache_output_layer_w_b=cache_output_layer_w_b, skip_softmax=skip_softmax) utils.check_condition(inference_model.num_source_factors == len(model_source_vocabs), "Number of loaded source vocabularies (%d) does not match " "number of source factors for model '%s' (%d)" % (len(model_source_vocabs), model_folder, inference_model.num_source_factors)) models.append(inference_model) utils.check_condition(vocab.are_identical(*target_vocabs), "Target vocabulary ids do not match") first_model_vocabs = source_vocabs[0] for fi in range(len(first_model_vocabs)): utils.check_condition(vocab.are_identical(*[source_vocabs[i][fi] for i in range(len(source_vocabs))]), "Source vocabulary ids do not match. Factor %d" % fi) source_with_eos = models[0].source_with_eos utils.check_condition(all(source_with_eos == m.source_with_eos for m in models), "All models must agree on using source-side EOS symbols or not. " "Did you try combining models trained with different versions?") # set a common max_output length for all models. max_input_len, get_max_output_length = models_max_input_output_length(models, max_output_length_num_stds, max_input_len, forced_max_output_len=forced_max_output_len) for inference_model in models: inference_model.initialize(batch_size, max_input_len, get_max_output_length) load_time = time.time() - load_time_start logger.info("%d model(s) loaded in %.4fs", len(models), load_time) return models, source_vocabs[0], target_vocabs[0]
[ "def", "load_models", "(", "context", ":", "mx", ".", "context", ".", "Context", ",", "max_input_len", ":", "Optional", "[", "int", "]", ",", "beam_size", ":", "int", ",", "batch_size", ":", "int", ",", "model_folders", ":", "List", "[", "str", "]", ",...
Loads a list of models for inference. :param context: MXNet context to bind modules to. :param max_input_len: Maximum input length. :param beam_size: Beam size. :param batch_size: Batch size. :param model_folders: List of model folders to load models from. :param checkpoints: List of checkpoints to use for each model in model_folders. Use None to load best checkpoint. :param softmax_temperature: Optional parameter to control steepness of softmax distribution. :param max_output_length_num_stds: Number of standard deviations to add to mean target-source length ratio to compute maximum output length. :param decoder_return_logit_inputs: Model decoders return inputs to logit computation instead of softmax over target vocabulary. Used when logits/softmax are handled separately. :param cache_output_layer_w_b: Models cache weights and biases for logit computation as NumPy arrays (used with restrict lexicon). :param forced_max_output_len: An optional overwrite of the maximum output length. :param override_dtype: Overrides dtype of encoder and decoder defined at training time to a different one. :param output_scores: Whether the scores will be needed as outputs. If True, scores will be normalized, negative log probabilities. If False, scores will be negative, raw logit activations if decoding with beam size 1 and a single model. :param sampling: True if the model is sampling instead of doing normal topk(). :return: List of models, source vocabulary, target vocabulary, source factor vocabularies.
[ "Loads", "a", "list", "of", "models", "for", "inference", "." ]
python
train
59.22314
AshleySetter/optoanalysis
optoanalysis/optoanalysis/optoanalysis.py
https://github.com/AshleySetter/optoanalysis/blob/9d390acc834d70024d47b574aea14189a5a5714e/optoanalysis/optoanalysis/optoanalysis.py#L770-L827
def calc_gamma_from_energy_autocorrelation_fit(self, GammaGuess=None, silent=False, MakeFig=True, show_fig=True): """ Calculates the total damping, i.e. Gamma, by calculating the energy each point in time. This energy array is then used for the autocorrleation. The autocorrelation is fitted with an exponential relaxation function and the function returns the parameters with errors. Parameters ---------- GammaGuess : float, optional Inital guess for BigGamma (in radians) silent : bool, optional Whether it prints the values fitted or is silent. MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- Gamma : ufloat Big Gamma, the total damping in radians fig : matplotlib.figure.Figure object The figure object created showing the autocorrelation of the data with the fit ax : matplotlib.axes.Axes object The axes object created showing the autocorrelation of the data with the fit """ autocorrelation = calc_autocorrelation(self.voltage[:-1]**2*self.OmegaTrap.n**2+(_np.diff(self.voltage)*self.SampleFreq)**2) time = self.time.get_array()[:len(autocorrelation)] if GammaGuess==None: Gamma_Initial = (time[4]-time[0])/(autocorrelation[0]-autocorrelation[4]) else: Gamma_Initial = GammaGuess if MakeFig == True: Params, ParamsErr, fig, ax = fit_autocorrelation( autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) else: Params, ParamsErr, _ , _ = fit_autocorrelation( autocorrelation, time, Gamma_Initial, MakeFig=MakeFig, show_fig=show_fig) if silent == False: print("\n") print( "Big Gamma: {} +- {}% ".format(Params[0], ParamsErr[0] / Params[0] * 100)) Gamma = _uncertainties.ufloat(Params[0], ParamsErr[0]) if MakeFig == True: return Gamma, fig, ax else: return Gamma, None, None
[ "def", "calc_gamma_from_energy_autocorrelation_fit", "(", "self", ",", "GammaGuess", "=", "None", ",", "silent", "=", "False", ",", "MakeFig", "=", "True", ",", "show_fig", "=", "True", ")", ":", "autocorrelation", "=", "calc_autocorrelation", "(", "self", ".", ...
Calculates the total damping, i.e. Gamma, by calculating the energy each point in time. This energy array is then used for the autocorrleation. The autocorrelation is fitted with an exponential relaxation function and the function returns the parameters with errors. Parameters ---------- GammaGuess : float, optional Inital guess for BigGamma (in radians) silent : bool, optional Whether it prints the values fitted or is silent. MakeFig : bool, optional Whether to construct and return the figure object showing the fitting. defaults to True show_fig : bool, optional Whether to show the figure object when it has been created. defaults to True Returns ------- Gamma : ufloat Big Gamma, the total damping in radians fig : matplotlib.figure.Figure object The figure object created showing the autocorrelation of the data with the fit ax : matplotlib.axes.Axes object The axes object created showing the autocorrelation of the data with the fit
[ "Calculates", "the", "total", "damping", "i", ".", "e", ".", "Gamma", "by", "calculating", "the", "energy", "each", "point", "in", "time", ".", "This", "energy", "array", "is", "then", "used", "for", "the", "autocorrleation", ".", "The", "autocorrelation", ...
python
train
40.448276
saltstack/salt
salt/modules/git.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/git.py#L48-L60
def _check_worktree_support(failhard=True): ''' Ensure that we don't try to operate on worktrees in git < 2.5.0. ''' git_version = version(versioninfo=False) if _LooseVersion(git_version) < _LooseVersion('2.5.0'): if failhard: raise CommandExecutionError( 'Worktrees are only supported in git 2.5.0 and newer ' '(detected git version: ' + git_version + ')' ) return False return True
[ "def", "_check_worktree_support", "(", "failhard", "=", "True", ")", ":", "git_version", "=", "version", "(", "versioninfo", "=", "False", ")", "if", "_LooseVersion", "(", "git_version", ")", "<", "_LooseVersion", "(", "'2.5.0'", ")", ":", "if", "failhard", ...
Ensure that we don't try to operate on worktrees in git < 2.5.0.
[ "Ensure", "that", "we", "don", "t", "try", "to", "operate", "on", "worktrees", "in", "git", "<", "2", ".", "5", ".", "0", "." ]
python
train
35.923077
ray-project/ray
python/ray/tune/suggest/sigopt.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/tune/suggest/sigopt.py#L95-L119
def on_trial_complete(self, trial_id, result=None, error=False, early_terminated=False): """Passes the result to SigOpt unless early terminated or errored. If a trial fails, it will be reported as a failed Observation, telling the optimizer that the Suggestion led to a metric failure, which updates the feasible region and improves parameter recommendation. Creates SigOpt Observation object for trial. """ if result: self.conn.experiments(self.experiment.id).observations().create( suggestion=self._live_trial_mapping[trial_id].id, value=result[self._reward_attr], ) # Update the experiment object self.experiment = self.conn.experiments(self.experiment.id).fetch() elif error or early_terminated: # Reports a failed Observation self.conn.experiments(self.experiment.id).observations().create( failed=True, suggestion=self._live_trial_mapping[trial_id].id) del self._live_trial_mapping[trial_id]
[ "def", "on_trial_complete", "(", "self", ",", "trial_id", ",", "result", "=", "None", ",", "error", "=", "False", ",", "early_terminated", "=", "False", ")", ":", "if", "result", ":", "self", ".", "conn", ".", "experiments", "(", "self", ".", "experiment...
Passes the result to SigOpt unless early terminated or errored. If a trial fails, it will be reported as a failed Observation, telling the optimizer that the Suggestion led to a metric failure, which updates the feasible region and improves parameter recommendation. Creates SigOpt Observation object for trial.
[ "Passes", "the", "result", "to", "SigOpt", "unless", "early", "terminated", "or", "errored", "." ]
python
train
46.84
secdev/scapy
scapy/layers/sixlowpan.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/layers/sixlowpan.py#L751-L778
def sixlowpan_fragment(packet, datagram_tag=1): """Split a packet into different links to transmit as 6lowpan packets. Usage example: >>> ipv6 = ..... (very big packet) >>> pkts = sixlowpan_fragment(ipv6, datagram_tag=0x17) >>> send = [Dot15d4()/Dot15d4Data()/x for x in pkts] >>> wireshark(send) """ if not packet.haslayer(IPv6): raise Exception("SixLoWPAN only fragments IPv6 packets !") str_packet = raw(packet[IPv6]) if len(str_packet) <= MAX_SIZE: return [packet] def chunks(l, n): return [l[i:i + n] for i in range(0, len(l), n)] new_packet = chunks(str_packet, MAX_SIZE) new_packet[0] = LoWPANFragmentationFirst(datagramTag=datagram_tag, datagramSize=len(str_packet)) / new_packet[0] # noqa: E501 i = 1 while i < len(new_packet): new_packet[i] = LoWPANFragmentationSubsequent(datagramTag=datagram_tag, datagramSize=len(str_packet), datagramOffset=MAX_SIZE // 8 * i) / new_packet[i] # noqa: E501 i += 1 return new_packet
[ "def", "sixlowpan_fragment", "(", "packet", ",", "datagram_tag", "=", "1", ")", ":", "if", "not", "packet", ".", "haslayer", "(", "IPv6", ")", ":", "raise", "Exception", "(", "\"SixLoWPAN only fragments IPv6 packets !\"", ")", "str_packet", "=", "raw", "(", "p...
Split a packet into different links to transmit as 6lowpan packets. Usage example: >>> ipv6 = ..... (very big packet) >>> pkts = sixlowpan_fragment(ipv6, datagram_tag=0x17) >>> send = [Dot15d4()/Dot15d4Data()/x for x in pkts] >>> wireshark(send)
[ "Split", "a", "packet", "into", "different", "links", "to", "transmit", "as", "6lowpan", "packets", ".", "Usage", "example", ":", ">>>", "ipv6", "=", ".....", "(", "very", "big", "packet", ")", ">>>", "pkts", "=", "sixlowpan_fragment", "(", "ipv6", "datagr...
python
train
37.107143
chimera0/accel-brain-code
Automatic-Summarization/pysummarization/similarity_filter.py
https://github.com/chimera0/accel-brain-code/blob/03661f6f544bed656269fcd4b3c23c9061629daa/Automatic-Summarization/pysummarization/similarity_filter.py#L39-L43
def set_similarity_limit(self, value): ''' setter ''' if isinstance(value, float) is False: raise TypeError("__similarity_limit must be float.") self.__similarity_limit = value
[ "def", "set_similarity_limit", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "float", ")", "is", "False", ":", "raise", "TypeError", "(", "\"__similarity_limit must be float.\"", ")", "self", ".", "__similarity_limit", "=", "value" ...
setter
[ "setter" ]
python
train
41.6
eng-tools/sfsimodels
sfsimodels/models/soils.py
https://github.com/eng-tools/sfsimodels/blob/65a690ca440d61307f5a9b8478e4704f203a5925/sfsimodels/models/soils.py#L121-L127
def reset_all(self): """ Resets all parameters to None """ for item in self.inputs: setattr(self, "_%s" % item, None) self.stack = []
[ "def", "reset_all", "(", "self", ")", ":", "for", "item", "in", "self", ".", "inputs", ":", "setattr", "(", "self", ",", "\"_%s\"", "%", "item", ",", "None", ")", "self", ".", "stack", "=", "[", "]" ]
Resets all parameters to None
[ "Resets", "all", "parameters", "to", "None" ]
python
train
25.571429
dereneaton/ipyrad
ipyrad/assemble/util.py
https://github.com/dereneaton/ipyrad/blob/5eeb8a178160f45faf71bf47cec4abe998a575d1/ipyrad/assemble/util.py#L521-L719
def merge_pairs(data, two_files, merged_out, revcomp, merge): """ Merge PE reads. Takes in a list of unmerged files [r1, r2] and the filehandle to write merged data to, and it returns the number of reads that were merged (overlapping). If merge==0 then only concat pairs (nnnn), no merging in vsearch. Parameters ----------- two_files (tuple): A list or tuple of the [r1, r2] files to be merged. merged_out (str): A string file handle for the merged data to be written to. revcomp (bool): Whether or not to revcomp the R2s. merge (bool): Whether or not to perform vsearch merging. If not then reads are simply concatenated with a 'nnnn' separator. Returns -------- If merge is on then the func will return the number of pairs successfully merged, else it returns -1. """ LOGGER.debug("Entering merge_pairs()") ## Return the number of merged pairs nmerged = -1 ## Check input files from inside list-tuple [(r1, r2)] for fhandle in two_files[0]: if not os.path.exists(fhandle): raise IPyradWarningExit(""" Attempting to merge a file that doesn't exist - {}""".format(fhandle)) ## If it already exists, clean up the old merged file if os.path.exists(merged_out): os.remove(merged_out) ## if merge then catch nonmerged in a separate file if merge: nonmerged1 = tempfile.NamedTemporaryFile(mode='wb', dir=data.dirs.edits, suffix="_nonmerged_R1_.fastq").name nonmerged2 = tempfile.NamedTemporaryFile(mode='wb', dir=data.dirs.edits, suffix="_nonmerged_R2_.fastq").name ## if not merging then the nonmerged reads will come from the normal edits else: nonmerged1 = two_files[0][0] nonmerged2 = two_files[0][1] ## get the maxn and minlen values try: maxn = sum(data.paramsdict['max_low_qual_bases']) except TypeError: maxn = data.paramsdict['max_low_qual_bases'] minlen = str(max(32, data.paramsdict["filter_min_trim_len"])) ## we need to gunzip the files if they are zipped (at least for now) if merge and two_files[0][0].endswith(".gz"): LOGGER.info("gunzipping pairs") tmp1 = os.path.splitext(two_files[0][0])[0]+".tmp1" tmp2 = os.path.splitext(two_files[0][1])[0]+".tmp2" out1 = open(tmp1, 'w') out2 = open(tmp2, 'w') gun1 = sps.Popen(["gunzip", "-c", two_files[0][0]], stderr=sps.STDOUT, stdout=out1, close_fds=True) gun2 = sps.Popen(["gunzip", "-c", two_files[0][1]], stderr=sps.STDOUT, stdout=out2, close_fds=True) _ = gun1.communicate() _ = gun2.communicate() out1.close() out2.close() else: tmp1 = two_files[0][0] tmp2 = two_files[0][1] try: ## If we are actually mergeing and not just joining then do vsearch if merge: ## create tmp files with high quality scores and with R2 oriented cmd = [ipyrad.bins.vsearch, "--fastq_mergepairs", tmp1, "--reverse", tmp2, "--fastqout", merged_out, "--fastqout_notmerged_fwd", nonmerged1, "--fastqout_notmerged_rev", nonmerged2, "--fasta_width", "0", "--fastq_minmergelen", minlen, "--fastq_maxns", str(maxn), "--fastq_minovlen", "20", "--fastq_maxdiffs", "4", "--label_suffix", "_m1", "--fastq_qmax", "1000", "--threads", "2", "--fastq_allowmergestagger"] LOGGER.debug("merge cmd: %s", " ".join(cmd)) proc = sps.Popen(cmd, stderr=sps.STDOUT, stdout=sps.PIPE) try: res = proc.communicate()[0] except KeyboardInterrupt: proc.kill() if proc.returncode: LOGGER.error("Error: %s %s", cmd, res) ## remove temp files rmfiles = [os.path.splitext(two_files[0][0])[0]+".tmp1", os.path.splitext(two_files[0][1])[0]+".tmp2", nonmerged1, nonmerged2] for rmfile in rmfiles: if os.path.exists(rmfile): os.remove(rmfile) raise IPyradWarningExit("Error merge pairs:\n %s\n%s", cmd, res) ## record how many read pairs were merged with open(merged_out, 'r') as tmpf: #nmerged = len(tmpf.readlines()) // 4 nmerged = sum(1 for i in tmpf.readlines()) // 4 ## Combine the unmerged pairs and append to the merge file with open(merged_out, 'ab') as combout: ## read in paired end read files 4 lines at a time if nonmerged1.endswith(".gz"): fr1 = gzip.open(nonmerged1, 'rb') else: fr1 = open(nonmerged1, 'rb') quart1 = itertools.izip(*[iter(fr1)]*4) if nonmerged2.endswith(".gz"): fr2 = gzip.open(nonmerged2, 'rb') else: fr2 = open(nonmerged2, 'rb') quart2 = itertools.izip(*[iter(fr2)]*4) quarts = itertools.izip(quart1, quart2) ## a list to store until writing writing = [] counts = 0 ## iterate until done while 1: try: read1s, read2s = quarts.next() except StopIteration: break if revcomp: writing.append("".join([ read1s[0], read1s[1].strip() + "nnnn" + \ comp(read2s[1].strip()[::-1]) + "\n", read1s[2], read1s[3].strip() + "nnnn" + \ read2s[3].strip()[::-1] + "\n", ])) else: writing.append("".join([ read1s[0], read1s[1].strip() + "nnnn" + \ read2s[1], read1s[2], read1s[3].strip() + "nnnn" + \ read2s[3], ])) counts += 1 if not counts % 10: combout.write("".join(writing)) #+"\n") writing = [] if writing: combout.write("".join(writing)) ## close handles fr1.close() fr2.close() combout.close() except Exception as inst: LOGGER.error("Exception in merge_pairs - {}".format(inst)) raise ## No matter what happens please clean up the temp files. finally: ## if merged then delete the nonmerge tmp files if merge: ## remove temp files rmfiles = [nonmerged1, nonmerged2, os.path.splitext(two_files[0][0])[0]+".tmp1", os.path.splitext(two_files[0][1])[0]+".tmp2"] for rmfile in rmfiles: if os.path.exists(rmfile): os.remove(rmfile) return nmerged
[ "def", "merge_pairs", "(", "data", ",", "two_files", ",", "merged_out", ",", "revcomp", ",", "merge", ")", ":", "LOGGER", ".", "debug", "(", "\"Entering merge_pairs()\"", ")", "## Return the number of merged pairs", "nmerged", "=", "-", "1", "## Check input files fr...
Merge PE reads. Takes in a list of unmerged files [r1, r2] and the filehandle to write merged data to, and it returns the number of reads that were merged (overlapping). If merge==0 then only concat pairs (nnnn), no merging in vsearch. Parameters ----------- two_files (tuple): A list or tuple of the [r1, r2] files to be merged. merged_out (str): A string file handle for the merged data to be written to. revcomp (bool): Whether or not to revcomp the R2s. merge (bool): Whether or not to perform vsearch merging. If not then reads are simply concatenated with a 'nnnn' separator. Returns -------- If merge is on then the func will return the number of pairs successfully merged, else it returns -1.
[ "Merge", "PE", "reads", ".", "Takes", "in", "a", "list", "of", "unmerged", "files", "[", "r1", "r2", "]", "and", "the", "filehandle", "to", "write", "merged", "data", "to", "and", "it", "returns", "the", "number", "of", "reads", "that", "were", "merged...
python
valid
36.849246
MartinThoma/mpu
mpu/ml.py
https://github.com/MartinThoma/mpu/blob/61bc36d0192ca90c0bcf9b8a5d7d0d8520e20ff6/mpu/ml.py#L47-L70
def one_hot2indices(one_hots): """ Convert an iterable of one-hot encoded targets to a list of indices. Parameters ---------- one_hot : list Returns ------- indices : list Examples -------- >>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) [0, 1, 2] >>> one_hot2indices([[1, 0], [1, 0], [0, 1]]) [0, 0, 1] """ indices = [] for one_hot in one_hots: indices.append(argmax(one_hot)) return indices
[ "def", "one_hot2indices", "(", "one_hots", ")", ":", "indices", "=", "[", "]", "for", "one_hot", "in", "one_hots", ":", "indices", ".", "append", "(", "argmax", "(", "one_hot", ")", ")", "return", "indices" ]
Convert an iterable of one-hot encoded targets to a list of indices. Parameters ---------- one_hot : list Returns ------- indices : list Examples -------- >>> one_hot2indices([[1, 0, 0], [0, 1, 0], [0, 0, 1]]) [0, 1, 2] >>> one_hot2indices([[1, 0], [1, 0], [0, 1]]) [0, 0, 1]
[ "Convert", "an", "iterable", "of", "one", "-", "hot", "encoded", "targets", "to", "a", "list", "of", "indices", "." ]
python
train
19.166667
hubo1016/vlcp
vlcp/utils/http.py
https://github.com/hubo1016/vlcp/blob/239055229ec93a99cc7e15208075724ccf543bd1/vlcp/utils/http.py#L353-L364
async def close(self): """ Close this request, send all data. You can still run other operations in the handler. """ if not self._sendHeaders: self._startResponse() if self.inputstream is not None: self.inputstream.close(self.connection.scheduler) if self.outputstream is not None: await self.flush(True) if hasattr(self, 'session') and self.session: self.session.unlock()
[ "async", "def", "close", "(", "self", ")", ":", "if", "not", "self", ".", "_sendHeaders", ":", "self", ".", "_startResponse", "(", ")", "if", "self", ".", "inputstream", "is", "not", "None", ":", "self", ".", "inputstream", ".", "close", "(", "self", ...
Close this request, send all data. You can still run other operations in the handler.
[ "Close", "this", "request", "send", "all", "data", ".", "You", "can", "still", "run", "other", "operations", "in", "the", "handler", "." ]
python
train
38.75
saltstack/salt
salt/modules/napalm_yang_mod.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/napalm_yang_mod.py#L420-L567
def load_config(data, *models, **kwargs): ''' Generate and load the config on the device using the OpenConfig or IETF models and device profiles. data Dictionary structured with respect to the models referenced. models A list of models to be used when generating the config. profiles: ``None`` Use certain profiles to generate the config. If not specified, will use the platform default profile(s). test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: ``True`` Commit? Default: ``True``. debug: ``False`` Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw configuration loaded on the device. replace: ``False`` Should replace the config with the new generate one? CLI Example: .. code-block:: bash salt '*' napalm_yang.load_config {} models.openconfig_interfaces test=True debug=True Output Example: .. code-block:: jinja device1: ---------- already_configured: False comment: diff: [edit interfaces ge-0/0/0] - mtu 1400; [edit interfaces ge-0/0/0 unit 0 family inet] - dhcp; [edit interfaces lo0] - unit 0 { - description lo0.0; - } + unit 1 { + description "new loopback"; + } loaded_config: <configuration> <interfaces replace="replace"> <interface> <name>ge-0/0/0</name> <unit> <name>0</name> <family> <inet/> </family> <description>ge-0/0/0.0</description> </unit> <description>management interface</description> </interface> <interface> <name>ge-0/0/1</name> <disable/> <description>ge-0/0/1</description> </interface> <interface> <name>ae0</name> <unit> <name>0</name> <vlan-id>100</vlan-id> <family> <inet> <address> <name>192.168.100.1/24</name> </address> <address> <name>172.20.100.1/24</name> </address> </inet> </family> <description>a description</description> </unit> <vlan-tagging/> <unit> <name>1</name> <vlan-id>1</vlan-id> <family> <inet> <address> <name>192.168.101.1/24</name> </address> </inet> </family> <disable/> <description>ae0.1</description> </unit> <vlan-tagging/> <unit> <name>2</name> <vlan-id>2</vlan-id> <family> <inet> <address> <name>192.168.102.1/24</name> </address> </inet> </family> <description>ae0.2</description> </unit> <vlan-tagging/> </interface> <interface> <name>lo0</name> <unit> <name>1</name> <description>new loopback</description> </unit> <description>lo0</description> </interface> </interfaces> </configuration> result: True ''' if isinstance(models, tuple) and isinstance(models[0], list): models = models[0] config = get_config(data, *models, **kwargs) test = kwargs.pop('test', False) debug = kwargs.pop('debug', False) commit = kwargs.pop('commit', True) replace = kwargs.pop('replace', False) return __salt__['net.load_config'](text=config, test=test, debug=debug, commit=commit, replace=replace, inherit_napalm_device=napalm_device)
[ "def", "load_config", "(", "data", ",", "*", "models", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "models", ",", "tuple", ")", "and", "isinstance", "(", "models", "[", "0", "]", ",", "list", ")", ":", "models", "=", "models", "[", ...
Generate and load the config on the device using the OpenConfig or IETF models and device profiles. data Dictionary structured with respect to the models referenced. models A list of models to be used when generating the config. profiles: ``None`` Use certain profiles to generate the config. If not specified, will use the platform default profile(s). test: ``False`` Dry run? If set as ``True``, will apply the config, discard and return the changes. Default: ``False`` and will commit the changes on the device. commit: ``True`` Commit? Default: ``True``. debug: ``False`` Debug mode. Will insert a new key under the output dictionary, as ``loaded_config`` containing the raw configuration loaded on the device. replace: ``False`` Should replace the config with the new generate one? CLI Example: .. code-block:: bash salt '*' napalm_yang.load_config {} models.openconfig_interfaces test=True debug=True Output Example: .. code-block:: jinja device1: ---------- already_configured: False comment: diff: [edit interfaces ge-0/0/0] - mtu 1400; [edit interfaces ge-0/0/0 unit 0 family inet] - dhcp; [edit interfaces lo0] - unit 0 { - description lo0.0; - } + unit 1 { + description "new loopback"; + } loaded_config: <configuration> <interfaces replace="replace"> <interface> <name>ge-0/0/0</name> <unit> <name>0</name> <family> <inet/> </family> <description>ge-0/0/0.0</description> </unit> <description>management interface</description> </interface> <interface> <name>ge-0/0/1</name> <disable/> <description>ge-0/0/1</description> </interface> <interface> <name>ae0</name> <unit> <name>0</name> <vlan-id>100</vlan-id> <family> <inet> <address> <name>192.168.100.1/24</name> </address> <address> <name>172.20.100.1/24</name> </address> </inet> </family> <description>a description</description> </unit> <vlan-tagging/> <unit> <name>1</name> <vlan-id>1</vlan-id> <family> <inet> <address> <name>192.168.101.1/24</name> </address> </inet> </family> <disable/> <description>ae0.1</description> </unit> <vlan-tagging/> <unit> <name>2</name> <vlan-id>2</vlan-id> <family> <inet> <address> <name>192.168.102.1/24</name> </address> </inet> </family> <description>ae0.2</description> </unit> <vlan-tagging/> </interface> <interface> <name>lo0</name> <unit> <name>1</name> <description>new loopback</description> </unit> <description>lo0</description> </interface> </interfaces> </configuration> result: True
[ "Generate", "and", "load", "the", "config", "on", "the", "device", "using", "the", "OpenConfig", "or", "IETF", "models", "and", "device", "profiles", "." ]
python
train
35.006757
raiden-network/raiden
raiden/transfer/mediated_transfer/target.py
https://github.com/raiden-network/raiden/blob/407ba15c72074e9de88771d6b9661ff4dc36bef5/raiden/transfer/mediated_transfer/target.py#L215-L238
def handle_onchain_secretreveal( target_state: TargetTransferState, state_change: ContractReceiveSecretReveal, channel_state: NettingChannelState, ) -> TransitionResult[TargetTransferState]: """ Validates and handles a ContractReceiveSecretReveal state change. """ valid_secret = is_valid_secret_reveal( state_change=state_change, transfer_secrethash=target_state.transfer.lock.secrethash, secret=state_change.secret, ) if valid_secret: channel.register_onchain_secret( channel_state=channel_state, secret=state_change.secret, secrethash=state_change.secrethash, secret_reveal_block_number=state_change.block_number, ) target_state.state = TargetTransferState.ONCHAIN_UNLOCK target_state.secret = state_change.secret return TransitionResult(target_state, list())
[ "def", "handle_onchain_secretreveal", "(", "target_state", ":", "TargetTransferState", ",", "state_change", ":", "ContractReceiveSecretReveal", ",", "channel_state", ":", "NettingChannelState", ",", ")", "->", "TransitionResult", "[", "TargetTransferState", "]", ":", "val...
Validates and handles a ContractReceiveSecretReveal state change.
[ "Validates", "and", "handles", "a", "ContractReceiveSecretReveal", "state", "change", "." ]
python
train
37.083333
michaelaye/pyciss
pyciss/downloader.py
https://github.com/michaelaye/pyciss/blob/019256424466060babead7edab86736c881b0831/pyciss/downloader.py#L54-L83
def download_and_calibrate(img_id=None, overwrite=False, recalibrate=False, **kwargs): """Download and calibrate one or more image ids, in parallel. Parameters ---------- img_id : str or io.PathManager, optional If more than one item is in img_id, a parallel process is started overwrite: bool, optional If the pm.cubepath exists, this switch controls if it is being overwritten. Default: False """ if isinstance(img_id, io.PathManager): pm = img_id else: # get a PathManager object that knows where your data is or should be logger.debug("Creating Pathmanager object") pm = io.PathManager(img_id) if not pm.raw_image.exists() or overwrite is True: logger.debug("Downloading file %s" % pm.img_id) download_file_id(pm.img_id) pm = io.PathManager(img_id) # refresh, to get proper PDS version id. else: logger.info("Found ") if not (pm.cubepath.exists() and pm.undestriped.exists()) or overwrite is True: calib = pipeline.Calibrator(img_id, **kwargs) calib.standard_calib() else: print("All files exist. Use overwrite=True to redownload and calibrate.")
[ "def", "download_and_calibrate", "(", "img_id", "=", "None", ",", "overwrite", "=", "False", ",", "recalibrate", "=", "False", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "img_id", ",", "io", ".", "PathManager", ")", ":", "pm", "=", "im...
Download and calibrate one or more image ids, in parallel. Parameters ---------- img_id : str or io.PathManager, optional If more than one item is in img_id, a parallel process is started overwrite: bool, optional If the pm.cubepath exists, this switch controls if it is being overwritten. Default: False
[ "Download", "and", "calibrate", "one", "or", "more", "image", "ids", "in", "parallel", "." ]
python
train
39.5
saxix/sample-data-utils
sample_data_utils/people.py
https://github.com/saxix/sample-data-utils/blob/769f1b46e60def2675a14bd5872047af6d1ea398/sample_data_utils/people.py#L118-L135
def last_name(languages=None): """ return a random last name >>> from mock import patch >>> with patch('%s._get_lastnames' % __name__, lambda *args: ['aaa']): ... last_name() 'Aaa' >>> with patch('%s.get_lastnames' % __name__, lambda lang: ['%s_lastname'% lang]): ... last_name(['it']) 'It_Lastname' """ choices = [] languages = languages or ['en'] for lang in languages: samples = _get_lastnames(lang) choices.extend(samples) return random.choice(choices).title()
[ "def", "last_name", "(", "languages", "=", "None", ")", ":", "choices", "=", "[", "]", "languages", "=", "languages", "or", "[", "'en'", "]", "for", "lang", "in", "languages", ":", "samples", "=", "_get_lastnames", "(", "lang", ")", "choices", ".", "ex...
return a random last name >>> from mock import patch >>> with patch('%s._get_lastnames' % __name__, lambda *args: ['aaa']): ... last_name() 'Aaa' >>> with patch('%s.get_lastnames' % __name__, lambda lang: ['%s_lastname'% lang]): ... last_name(['it']) 'It_Lastname'
[ "return", "a", "random", "last", "name" ]
python
test
29.555556
iterative/dvc
dvc/progress.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/progress.py#L49-L62
def refresh(self, line=None): """Refreshes progress bar.""" # Just go away if it is locked. Will update next time if not self._lock.acquire(False): return if line is None: line = self._line if sys.stdout.isatty() and line is not None: self._writeln(line) self._line = line self._lock.release()
[ "def", "refresh", "(", "self", ",", "line", "=", "None", ")", ":", "# Just go away if it is locked. Will update next time", "if", "not", "self", ".", "_lock", ".", "acquire", "(", "False", ")", ":", "return", "if", "line", "is", "None", ":", "line", "=", "...
Refreshes progress bar.
[ "Refreshes", "progress", "bar", "." ]
python
train
27.071429
mwickert/scikit-dsp-comm
sk_dsp_comm/synchronization.py
https://github.com/mwickert/scikit-dsp-comm/blob/5c1353412a4d81a8d7da169057564ecf940f8b5b/sk_dsp_comm/synchronization.py#L255-L269
def time_step(z,Ns,t_step,Nstep): """ Create a one sample per symbol signal containing a phase rotation step Nsymb into the waveform. :param z: complex baseband signal after matched filter :param Ns: number of sample per symbol :param t_step: in samples relative to Ns :param Nstep: symbol sample location where the step turns on :return: the one sample per symbol signal containing the phase step Mark Wickert July 2014 """ z_step = np.hstack((z[:Ns*Nstep], z[(Ns*Nstep+t_step):], np.zeros(t_step))) return z_step
[ "def", "time_step", "(", "z", ",", "Ns", ",", "t_step", ",", "Nstep", ")", ":", "z_step", "=", "np", ".", "hstack", "(", "(", "z", "[", ":", "Ns", "*", "Nstep", "]", ",", "z", "[", "(", "Ns", "*", "Nstep", "+", "t_step", ")", ":", "]", ",",...
Create a one sample per symbol signal containing a phase rotation step Nsymb into the waveform. :param z: complex baseband signal after matched filter :param Ns: number of sample per symbol :param t_step: in samples relative to Ns :param Nstep: symbol sample location where the step turns on :return: the one sample per symbol signal containing the phase step Mark Wickert July 2014
[ "Create", "a", "one", "sample", "per", "symbol", "signal", "containing", "a", "phase", "rotation", "step", "Nsymb", "into", "the", "waveform", "." ]
python
valid
36.666667
johntruckenbrodt/spatialist
spatialist/vector.py
https://github.com/johntruckenbrodt/spatialist/blob/007f49296a156de8d7168ad235b5a5b8e8d3633d/spatialist/vector.py#L163-L182
def addlayer(self, name, srs, geomType): """ add a layer to the vector layer Parameters ---------- name: str the layer name srs: int, str or :osgeo:class:`osr.SpatialReference` the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options. geomType: int an OGR well-known binary data type. See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_. Returns ------- """ self.vector.CreateLayer(name, srs, geomType) self.init_layer()
[ "def", "addlayer", "(", "self", ",", "name", ",", "srs", ",", "geomType", ")", ":", "self", ".", "vector", ".", "CreateLayer", "(", "name", ",", "srs", ",", "geomType", ")", "self", ".", "init_layer", "(", ")" ]
add a layer to the vector layer Parameters ---------- name: str the layer name srs: int, str or :osgeo:class:`osr.SpatialReference` the spatial reference system. See :func:`spatialist.auxil.crsConvert` for options. geomType: int an OGR well-known binary data type. See `Module ogr <https://gdal.org/python/osgeo.ogr-module.html>`_. Returns -------
[ "add", "a", "layer", "to", "the", "vector", "layer" ]
python
train
29.35
limodou/uliweb
uliweb/contrib/generic/__init__.py
https://github.com/limodou/uliweb/blob/34472f25e4bc0b954a35346672f94e84ef18b076/uliweb/contrib/generic/__init__.py#L88-L112
def _list_view(self, model, **kwargs): """ :param model: :param fields_convert_map: it's different from ListView :param kwargs: :return: """ from uliweb import request #add download fields process fields = kwargs.pop('fields', None) meta = kwargs.pop('meta', 'Table') if 'download' in request.GET: if 'download_fields' in kwargs: fields = kwargs.pop('download_fields', fields) if 'download_meta' in kwargs: meta = kwargs.pop('download_meta') else: if hasattr(model, 'Download'): meta = 'Download' else: meta = meta view = functions.ListView(model, fields=fields, meta=meta, **kwargs) return view
[ "def", "_list_view", "(", "self", ",", "model", ",", "*", "*", "kwargs", ")", ":", "from", "uliweb", "import", "request", "#add download fields process", "fields", "=", "kwargs", ".", "pop", "(", "'fields'", ",", "None", ")", "meta", "=", "kwargs", ".", ...
:param model: :param fields_convert_map: it's different from ListView :param kwargs: :return:
[ ":", "param", "model", ":", ":", "param", "fields_convert_map", ":", "it", "s", "different", "from", "ListView", ":", "param", "kwargs", ":", ":", "return", ":" ]
python
train
32.68
manodeep/Corrfunc
mocks/python_bindings/call_correlation_functions_mocks.py
https://github.com/manodeep/Corrfunc/blob/753aa50b93eebfefc76a0b0cd61522536bd45d2a/mocks/python_bindings/call_correlation_functions_mocks.py#L28-L41
def read_text_file(filename, encoding="utf-8"): """ Reads a file under python3 with encoding (default UTF-8). Also works under python2, without encoding. Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp) principle. """ try: with open(filename, 'r', encoding) as f: r = f.read() except TypeError: with open(filename, 'r') as f: r = f.read() return r
[ "def", "read_text_file", "(", "filename", ",", "encoding", "=", "\"utf-8\"", ")", ":", "try", ":", "with", "open", "(", "filename", ",", "'r'", ",", "encoding", ")", "as", "f", ":", "r", "=", "f", ".", "read", "(", ")", "except", "TypeError", ":", ...
Reads a file under python3 with encoding (default UTF-8). Also works under python2, without encoding. Uses the EAFP (https://docs.python.org/2/glossary.html#term-eafp) principle.
[ "Reads", "a", "file", "under", "python3", "with", "encoding", "(", "default", "UTF", "-", "8", ")", ".", "Also", "works", "under", "python2", "without", "encoding", ".", "Uses", "the", "EAFP", "(", "https", ":", "//", "docs", ".", "python", ".", "org",...
python
train
30.5
materialsproject/pymatgen
pymatgen/io/cif.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/io/cif.py#L822-L839
def parse_oxi_states(self, data): """ Parse oxidation states from data dictionary """ try: oxi_states = { data["_atom_type_symbol"][i]: str2float(data["_atom_type_oxidation_number"][i]) for i in range(len(data["_atom_type_symbol"]))} # attempt to strip oxidation state from _atom_type_symbol # in case the label does not contain an oxidation state for i, symbol in enumerate(data["_atom_type_symbol"]): oxi_states[re.sub(r"\d?[\+,\-]?$", "", symbol)] = \ str2float(data["_atom_type_oxidation_number"][i]) except (ValueError, KeyError): oxi_states = None return oxi_states
[ "def", "parse_oxi_states", "(", "self", ",", "data", ")", ":", "try", ":", "oxi_states", "=", "{", "data", "[", "\"_atom_type_symbol\"", "]", "[", "i", "]", ":", "str2float", "(", "data", "[", "\"_atom_type_oxidation_number\"", "]", "[", "i", "]", ")", "...
Parse oxidation states from data dictionary
[ "Parse", "oxidation", "states", "from", "data", "dictionary" ]
python
train
41.722222
mozilla-releng/mozilla-version
mozilla_version/parser.py
https://github.com/mozilla-releng/mozilla-version/blob/e5400f31f7001bd48fb6e17626905147dd4c17d7/mozilla_version/parser.py#L26-L33
def positive_int(val): """Parse `val` into a positive integer.""" if isinstance(val, float): raise ValueError('"{}" must not be a float'.format(val)) val = int(val) if val >= 0: return val raise ValueError('"{}" must be positive'.format(val))
[ "def", "positive_int", "(", "val", ")", ":", "if", "isinstance", "(", "val", ",", "float", ")", ":", "raise", "ValueError", "(", "'\"{}\" must not be a float'", ".", "format", "(", "val", ")", ")", "val", "=", "int", "(", "val", ")", "if", "val", ">=",...
Parse `val` into a positive integer.
[ "Parse", "val", "into", "a", "positive", "integer", "." ]
python
train
33.875
reanahub/reana-commons
reana_commons/utils.py
https://github.com/reanahub/reana-commons/blob/abf31d9f495e0d93171c43fc4a414cd292091b11/reana_commons/utils.py#L88-L95
def calculate_file_access_time(workflow_workspace): """Calculate access times of files in workspace.""" access_times = {} for subdir, dirs, files in os.walk(workflow_workspace): for file in files: file_path = os.path.join(subdir, file) access_times[file_path] = os.stat(file_path).st_atime return access_times
[ "def", "calculate_file_access_time", "(", "workflow_workspace", ")", ":", "access_times", "=", "{", "}", "for", "subdir", ",", "dirs", ",", "files", "in", "os", ".", "walk", "(", "workflow_workspace", ")", ":", "for", "file", "in", "files", ":", "file_path",...
Calculate access times of files in workspace.
[ "Calculate", "access", "times", "of", "files", "in", "workspace", "." ]
python
train
43.75
ibis-project/ibis
ibis/config.py
https://github.com/ibis-project/ibis/blob/1e39a5fd9ef088b45c155e8a5f541767ee8ef2e7/ibis/config.py#L599-L631
def pp_options_list(keys, width=80, _print=False): """ Builds a concise listing of available options, grouped by prefix """ from textwrap import wrap from itertools import groupby def pp(name, ks): pfx = '- ' + name + '.[' if name else '' ls = wrap( ', '.join(ks), width, initial_indent=pfx, subsequent_indent=' ', break_long_words=False, ) if ls and ls[-1] and name: ls[-1] = ls[-1] + ']' return ls ls = [] singles = [x for x in sorted(keys) if x.find('.') < 0] if singles: ls += pp('', singles) keys = [x for x in keys if x.find('.') >= 0] for k, g in groupby(sorted(keys), lambda x: x[: x.rfind('.')]): ks = [x[len(k) + 1 :] for x in list(g)] ls += pp(k, ks) s = '\n'.join(ls) if _print: print(s) else: return s
[ "def", "pp_options_list", "(", "keys", ",", "width", "=", "80", ",", "_print", "=", "False", ")", ":", "from", "textwrap", "import", "wrap", "from", "itertools", "import", "groupby", "def", "pp", "(", "name", ",", "ks", ")", ":", "pfx", "=", "'- '", ...
Builds a concise listing of available options, grouped by prefix
[ "Builds", "a", "concise", "listing", "of", "available", "options", "grouped", "by", "prefix" ]
python
train
26.878788
jwass/geog
geog/geog.py
https://github.com/jwass/geog/blob/52ceb9b543454b31c63694ee459aad9cd52f011a/geog/geog.py#L129-L184
def propagate(p0, angle, d, deg=True, bearing=False, r=r_earth_mean): """ Given an initial point and angle, move distance d along the surface Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects angle : float (or array of float) bearing. Note that by default, 0 degrees is due East increasing clockwise so that 90 degrees is due North. See the bearing flag to change the meaning of this angle d : float (or array of float) distance to move. The units of d should be consistent with input r deg : bool, optional (default True) Whether both p0 and angle are specified in degrees. The output points will also match the value of this flag. bearing : bool, optional (default False) Indicates whether to interpret the input angle as the classical definition of bearing. r : float, optional (default r_earth_mean) radius of the sphere Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Destination Note: Spherical earth model. By default uses radius of 6371.0 km. """ single, (p0, angle, d) = _to_arrays((p0, 2), (angle, 1), (d, 1)) if deg: p0 = np.radians(p0) angle = np.radians(angle) if not bearing: angle = np.pi / 2.0 - angle lon0, lat0 = p0[:,0], p0[:,1] angd = d / r lat1 = arcsin(sin(lat0) * cos(angd) + cos(lat0) * sin(angd) * cos(angle)) a = sin(angle) * sin(angd) * cos(lat0) b = cos(angd) - sin(lat0) * sin(lat1) lon1 = lon0 + arctan2(a, b) p1 = np.column_stack([lon1, lat1]) if deg: p1 = np.degrees(p1) if single: p1 = p1[0] return p1
[ "def", "propagate", "(", "p0", ",", "angle", ",", "d", ",", "deg", "=", "True", ",", "bearing", "=", "False", ",", "r", "=", "r_earth_mean", ")", ":", "single", ",", "(", "p0", ",", "angle", ",", "d", ")", "=", "_to_arrays", "(", "(", "p0", ","...
Given an initial point and angle, move distance d along the surface Parameters ---------- p0 : point-like (or array of point-like) [lon, lat] objects angle : float (or array of float) bearing. Note that by default, 0 degrees is due East increasing clockwise so that 90 degrees is due North. See the bearing flag to change the meaning of this angle d : float (or array of float) distance to move. The units of d should be consistent with input r deg : bool, optional (default True) Whether both p0 and angle are specified in degrees. The output points will also match the value of this flag. bearing : bool, optional (default False) Indicates whether to interpret the input angle as the classical definition of bearing. r : float, optional (default r_earth_mean) radius of the sphere Reference --------- http://www.movable-type.co.uk/scripts/latlong.html - Destination Note: Spherical earth model. By default uses radius of 6371.0 km.
[ "Given", "an", "initial", "point", "and", "angle", "move", "distance", "d", "along", "the", "surface" ]
python
train
29.892857
pybel/pybel
src/pybel/struct/graph.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/struct/graph.py#L251-L253
def defined_namespace_keywords(self) -> Set[str]: # noqa: D401 """The set of all keywords defined as namespaces in this graph.""" return set(self.namespace_pattern) | set(self.namespace_url)
[ "def", "defined_namespace_keywords", "(", "self", ")", "->", "Set", "[", "str", "]", ":", "# noqa: D401", "return", "set", "(", "self", ".", "namespace_pattern", ")", "|", "set", "(", "self", ".", "namespace_url", ")" ]
The set of all keywords defined as namespaces in this graph.
[ "The", "set", "of", "all", "keywords", "defined", "as", "namespaces", "in", "this", "graph", "." ]
python
train
68.333333
nerox8664/pytorch2keras
pytorch2keras/operation_layers.py
https://github.com/nerox8664/pytorch2keras/blob/750eaf747323580e6732d0c5ba9f2f39cb096764/pytorch2keras/operation_layers.py#L10-L32
def convert_sum( params, w_name, scope_name, inputs, layers, weights, names ): """ Convert sum. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers """ print('Converting Sum ...') def target_layer(x): import keras.backend as K return K.sum(x) lambda_layer = keras.layers.Lambda(target_layer) layers[scope_name] = lambda_layer(layers[inputs[0]])
[ "def", "convert_sum", "(", "params", ",", "w_name", ",", "scope_name", ",", "inputs", ",", "layers", ",", "weights", ",", "names", ")", ":", "print", "(", "'Converting Sum ...'", ")", "def", "target_layer", "(", "x", ")", ":", "import", "keras", ".", "ba...
Convert sum. Args: params: dictionary with layer parameters w_name: name prefix in state_dict scope_name: pytorch scope name inputs: pytorch node inputs layers: dictionary with keras tensors weights: pytorch state_dict names: use short names for keras layers
[ "Convert", "sum", "." ]
python
valid
27.26087
tensorflow/cleverhans
examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/fgsm/attack_fgsm.py
https://github.com/tensorflow/cleverhans/blob/97488e215760547b81afc53f5e5de8ba7da5bd98/examples/nips17_adversarial_competition/dev_toolkit/sample_attacks/fgsm/attack_fgsm.py#L47-L77
def load_images(input_dir, batch_shape): """Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Lenght of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch """ images = np.zeros(batch_shape) filenames = [] idx = 0 batch_size = batch_shape[0] for filepath in tf.gfile.Glob(os.path.join(input_dir, '*.png')): with tf.gfile.Open(filepath) as f: image = np.array(Image.open(f).convert('RGB')).astype(np.float) / 255.0 # Images for inception classifier are normalized to be in [-1, 1] interval. images[idx, :, :, :] = image * 2.0 - 1.0 filenames.append(os.path.basename(filepath)) idx += 1 if idx == batch_size: yield filenames, images filenames = [] images = np.zeros(batch_shape) idx = 0 if idx > 0: yield filenames, images
[ "def", "load_images", "(", "input_dir", ",", "batch_shape", ")", ":", "images", "=", "np", ".", "zeros", "(", "batch_shape", ")", "filenames", "=", "[", "]", "idx", "=", "0", "batch_size", "=", "batch_shape", "[", "0", "]", "for", "filepath", "in", "tf...
Read png images from input directory in batches. Args: input_dir: input directory batch_shape: shape of minibatch array, i.e. [batch_size, height, width, 3] Yields: filenames: list file names without path of each image Lenght of this list could be less than batch_size, in this case only first few images of the result are elements of the minibatch. images: array with all images from this batch
[ "Read", "png", "images", "from", "input", "directory", "in", "batches", "." ]
python
train
34.903226
wtsi-hgi/python-hgijson
hgijson/json_converters/builders.py
https://github.com/wtsi-hgi/python-hgijson/blob/6e8ccb562eabcaa816a136268a16504c2e0d4664/hgijson/json_converters/builders.py#L25-L48
def _get_all_property_mappings(encoder: MappingJSONEncoder, property_mappings: Iterable[JsonPropertyMapping], superclasses: Tuple[PropertyMapper]) -> List[JsonPropertyMapping]: """ Gets all of the property mappings from the given property mapper, considering the property mappings for self and the property mappings defined by the superclass. :param encoder: `self` when binded as class method :param property_mappings: mappings defined for the given encoder, excluding mappings defined by superclasses :param superclasses: superclasses of the given encoder. Property mappers in later superclasses may override the effects of property mappers defined by superclasses closer to the start of the list :return: all of the property mappings for the given encoder """ mappings = [] for superclass in superclasses: super_mappings = superclass._get_property_mappings(superclass) mappings.extend(super_mappings) # Add property mappings of own to end of the mappings list mappings.extend(property_mappings) # Note: It is very difficult to cull all property mappers that target the same properties, leaving only the ones # from the lowest class in the hierarchy. This is because such mappers may be encoded as functions. Given that such # overloading is unlikely to be used much and the cost of doing a mapping and then mapping again over the top of it # will likely be small, there will be no attempt of such a cull. return mappings
[ "def", "_get_all_property_mappings", "(", "encoder", ":", "MappingJSONEncoder", ",", "property_mappings", ":", "Iterable", "[", "JsonPropertyMapping", "]", ",", "superclasses", ":", "Tuple", "[", "PropertyMapper", "]", ")", "->", "List", "[", "JsonPropertyMapping", ...
Gets all of the property mappings from the given property mapper, considering the property mappings for self and the property mappings defined by the superclass. :param encoder: `self` when binded as class method :param property_mappings: mappings defined for the given encoder, excluding mappings defined by superclasses :param superclasses: superclasses of the given encoder. Property mappers in later superclasses may override the effects of property mappers defined by superclasses closer to the start of the list :return: all of the property mappings for the given encoder
[ "Gets", "all", "of", "the", "property", "mappings", "from", "the", "given", "property", "mapper", "considering", "the", "property", "mappings", "for", "self", "and", "the", "property", "mappings", "defined", "by", "the", "superclass", ".", ":", "param", "encod...
python
train
63.375
mitsei/dlkit
dlkit/json_/repository/objects.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/repository/objects.py#L1177-L1195
def set_published_date(self, published_date): """Sets the published date. arg: published_date (osid.calendaring.DateTime): the new published date raise: InvalidArgument - ``published_date`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``published_date`` is ``null`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for osid.assessment.AssessmentOfferedForm.set_start_time_template if self.get_published_date_metadata().is_read_only(): raise errors.NoAccess() if not self._is_valid_date_time( published_date, self.get_published_date_metadata()): raise errors.InvalidArgument() self._my_map['publishedDate'] = published_date
[ "def", "set_published_date", "(", "self", ",", "published_date", ")", ":", "# Implemented from template for osid.assessment.AssessmentOfferedForm.set_start_time_template", "if", "self", ".", "get_published_date_metadata", "(", ")", ".", "is_read_only", "(", ")", ":", "raise",...
Sets the published date. arg: published_date (osid.calendaring.DateTime): the new published date raise: InvalidArgument - ``published_date`` is invalid raise: NoAccess - ``Metadata.isReadOnly()`` is ``true`` raise: NullArgument - ``published_date`` is ``null`` *compliance: mandatory -- This method must be implemented.*
[ "Sets", "the", "published", "date", "." ]
python
train
45.210526
log2timeline/plaso
plaso/lib/lexer.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/lib/lexer.py#L72-L120
def NextToken(self): """Fetch the next token by trying to match any of the regexes in order.""" current_state = self.state for token in self.tokens: # Does the rule apply to us? if not token.state_regex.match(current_state): continue # Try to match the rule m = token.regex.match(self.buffer) if not m: continue # The match consumes the data off the buffer (the handler can put it back # if it likes) # TODO: using joins might be more efficient here. self.processed_buffer += self.buffer[:m.end()] self.buffer = self.buffer[m.end():] self.processed += m.end() next_state = token.next_state for action in token.actions: # Is there a callback to handle this action? callback = getattr(self, action, self.Default) # Allow a callback to skip other callbacks. try: possible_next_state = callback(string=m.group(0), match=m) if possible_next_state == self._CONTINUE_STATE: continue # Override the state from the Token elif possible_next_state: next_state = possible_next_state except errors.ParseError as exception: self.Error(exception) # Update the next state if next_state: self.state = next_state return token # Check that we are making progress - if we are too full, we assume we are # stuck. self.Error('Expected {0:s}'.format(self.state)) self.processed_buffer += self.buffer[:1] self.buffer = self.buffer[1:] return self._ERROR_TOKEN
[ "def", "NextToken", "(", "self", ")", ":", "current_state", "=", "self", ".", "state", "for", "token", "in", "self", ".", "tokens", ":", "# Does the rule apply to us?", "if", "not", "token", ".", "state_regex", ".", "match", "(", "current_state", ")", ":", ...
Fetch the next token by trying to match any of the regexes in order.
[ "Fetch", "the", "next", "token", "by", "trying", "to", "match", "any", "of", "the", "regexes", "in", "order", "." ]
python
train
31.938776
yvesalexandre/bandicoot
bandicoot/individual.py
https://github.com/yvesalexandre/bandicoot/blob/73a658f6f17331541cf0b1547028db9b70e8d58a/bandicoot/individual.py#L250-L286
def response_delay_text(records): """ The response delay of the user within a conversation (in seconds) The following sequence of messages defines conversations (``I`` for an incoming text, ``O`` for an outgoing text, ``-`` for a one minute delay): :: I-O--I----O, we have a 60 seconds response delay and a 240 seconds response delay O--O---I--O, we have a 1200 seconds response delay I--II---I-I, we don't have a response delay. The user hasn't answered For this user, the distribution of response delays will be ``[60, 240, 60]`` Notes ----- See :ref:`Using bandicoot <conversations-label>` for a definition of conversations. Conversation are defined to be a series of text messages each sent no more than an hour after the previous. The response delay can thus not be greater than one hour. """ interactions = defaultdict(list) for r in records: interactions[r.correspondent_id].append(r) def _response_delay(grouped): ts = ((b.datetime - a.datetime).total_seconds() for conv in _conversations(grouped) for a, b in pairwise(conv) if b.direction == 'out' and a.direction == 'in') return ts delays = [r for i in interactions.values() for r in _response_delay(i) if r > 0] return summary_stats(delays)
[ "def", "response_delay_text", "(", "records", ")", ":", "interactions", "=", "defaultdict", "(", "list", ")", "for", "r", "in", "records", ":", "interactions", "[", "r", ".", "correspondent_id", "]", ".", "append", "(", "r", ")", "def", "_response_delay", ...
The response delay of the user within a conversation (in seconds) The following sequence of messages defines conversations (``I`` for an incoming text, ``O`` for an outgoing text, ``-`` for a one minute delay): :: I-O--I----O, we have a 60 seconds response delay and a 240 seconds response delay O--O---I--O, we have a 1200 seconds response delay I--II---I-I, we don't have a response delay. The user hasn't answered For this user, the distribution of response delays will be ``[60, 240, 60]`` Notes ----- See :ref:`Using bandicoot <conversations-label>` for a definition of conversations. Conversation are defined to be a series of text messages each sent no more than an hour after the previous. The response delay can thus not be greater than one hour.
[ "The", "response", "delay", "of", "the", "user", "within", "a", "conversation", "(", "in", "seconds", ")" ]
python
train
36.459459
twisted/txaws
txaws/server/schema.py
https://github.com/twisted/txaws/blob/5c3317376cd47e536625027e38c3b37840175ce0/txaws/server/schema.py#L646-L678
def _convert_flat_to_nest(self, params): """ Convert a structure in the form of:: {'foo.1.bar': 'value', 'foo.2.baz': 'value'} to:: {'foo': {'1': {'bar': 'value'}, '2': {'baz': 'value'}}} This is intended for use both during parsing of HTTP arguments like 'foo.1.bar=value' and when dealing with schema declarations that look like 'foo.n.bar'. This is the inverse of L{_convert_nest_to_flat}. """ result = {} for k, v in params.iteritems(): last = result segments = k.split('.') for index, item in enumerate(segments): if index == len(segments) - 1: newd = v else: newd = {} if not isinstance(last, dict): raise InconsistentParameterError(k) if type(last.get(item)) is dict and type(newd) is not dict: raise InconsistentParameterError(k) last = last.setdefault(item, newd) return result
[ "def", "_convert_flat_to_nest", "(", "self", ",", "params", ")", ":", "result", "=", "{", "}", "for", "k", ",", "v", "in", "params", ".", "iteritems", "(", ")", ":", "last", "=", "result", "segments", "=", "k", ".", "split", "(", "'.'", ")", "for",...
Convert a structure in the form of:: {'foo.1.bar': 'value', 'foo.2.baz': 'value'} to:: {'foo': {'1': {'bar': 'value'}, '2': {'baz': 'value'}}} This is intended for use both during parsing of HTTP arguments like 'foo.1.bar=value' and when dealing with schema declarations that look like 'foo.n.bar'. This is the inverse of L{_convert_nest_to_flat}.
[ "Convert", "a", "structure", "in", "the", "form", "of", "::" ]
python
train
33.363636
alphatwirl/alphatwirl
alphatwirl/concurrently/CommunicationChannel.py
https://github.com/alphatwirl/alphatwirl/blob/5138eeba6cd8a334ba52d6c2c022b33c61e3ba38/alphatwirl/concurrently/CommunicationChannel.py#L152-L187
def put_multiple(self, task_args_kwargs_list): """put a list of tasks and their arguments This method can be used to put multiple tasks at once. Calling this method once with multiple tasks can be much faster than calling `put()` multiple times. Parameters ---------- task_args_kwargs_list : list A list of lists with three items that can be parameters of `put()`, i.e., `task`, `args`, `kwargs`. Returns ------- list A list of task IDs. """ if not self.isopen: logger = logging.getLogger(__name__) logger.warning('the drop box is not open') return packages = [ ] for t in task_args_kwargs_list: try: task = t['task'] args = t.get('args', ()) kwargs = t.get('kwargs', {}) package = TaskPackage(task=task, args=args, kwargs=kwargs) except TypeError: package = TaskPackage(task=t, args=(), kwargs={}) packages.append(package) return self.dropbox.put_multiple(packages)
[ "def", "put_multiple", "(", "self", ",", "task_args_kwargs_list", ")", ":", "if", "not", "self", ".", "isopen", ":", "logger", "=", "logging", ".", "getLogger", "(", "__name__", ")", "logger", ".", "warning", "(", "'the drop box is not open'", ")", "return", ...
put a list of tasks and their arguments This method can be used to put multiple tasks at once. Calling this method once with multiple tasks can be much faster than calling `put()` multiple times. Parameters ---------- task_args_kwargs_list : list A list of lists with three items that can be parameters of `put()`, i.e., `task`, `args`, `kwargs`. Returns ------- list A list of task IDs.
[ "put", "a", "list", "of", "tasks", "and", "their", "arguments" ]
python
valid
31.805556
saltstack/salt
salt/modules/win_pkg.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/win_pkg.py#L766-L832
def _refresh_db_conditional(saltenv, **kwargs): ''' Internal use only in this module, has a different set of defaults and returns True or False. And supports checking the age of the existing generated metadata db, as well as ensure metadata db exists to begin with Args: saltenv (str): Salt environment Kwargs: force (bool): Force a refresh if the minimum age has been reached. Default is False. failhard (bool): If ``True``, an error will be raised if any repo SLS files failed to process. Returns: bool: True Fetched or Cache uptodate, False to indicate an issue :codeauthor: Damon Atkins <https://github.com/damon-atkins> ''' force = salt.utils.data.is_true(kwargs.pop('force', False)) failhard = salt.utils.data.is_true(kwargs.pop('failhard', False)) expired_max = __opts__['winrepo_cache_expire_max'] expired_min = __opts__['winrepo_cache_expire_min'] repo_details = _get_repo_details(saltenv) # Skip force if age less than minimum age if force and expired_min > 0 and repo_details.winrepo_age < expired_min: log.info( 'Refresh skipped, age of winrepo metadata in seconds (%s) is less ' 'than winrepo_cache_expire_min (%s)', repo_details.winrepo_age, expired_min ) force = False # winrepo_age is -1 if repo db does not exist refresh = True if force \ or repo_details.winrepo_age == -1 \ or repo_details.winrepo_age > expired_max \ else False if not refresh: log.debug( 'Using existing pkg metadata db for saltenv \'%s\' (age is %s)', saltenv, datetime.timedelta(seconds=repo_details.winrepo_age) ) return True if repo_details.winrepo_age == -1: # no repo meta db log.debug( 'No winrepo.p cache file for saltenv \'%s\', creating one now', saltenv ) results = refresh_db(saltenv=saltenv, verbose=False, failhard=failhard) try: # Return True if there were no failed winrepo SLS files, and False if # failures were reported. return not bool(results.get('failed', 0)) except AttributeError: return False
[ "def", "_refresh_db_conditional", "(", "saltenv", ",", "*", "*", "kwargs", ")", ":", "force", "=", "salt", ".", "utils", ".", "data", ".", "is_true", "(", "kwargs", ".", "pop", "(", "'force'", ",", "False", ")", ")", "failhard", "=", "salt", ".", "ut...
Internal use only in this module, has a different set of defaults and returns True or False. And supports checking the age of the existing generated metadata db, as well as ensure metadata db exists to begin with Args: saltenv (str): Salt environment Kwargs: force (bool): Force a refresh if the minimum age has been reached. Default is False. failhard (bool): If ``True``, an error will be raised if any repo SLS files failed to process. Returns: bool: True Fetched or Cache uptodate, False to indicate an issue :codeauthor: Damon Atkins <https://github.com/damon-atkins>
[ "Internal", "use", "only", "in", "this", "module", "has", "a", "different", "set", "of", "defaults", "and", "returns", "True", "or", "False", ".", "And", "supports", "checking", "the", "age", "of", "the", "existing", "generated", "metadata", "db", "as", "w...
python
train
33.328358
enkore/i3pystatus
i3pystatus/cpu_usage.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/cpu_usage.py#L116-L134
def get_usage(self): """ parses /proc/stat and calcualtes total and busy time (more specific USER_HZ see man 5 proc for further informations ) """ usage = {} for cpu, timings in self.get_cpu_timings().items(): cpu_total = sum(timings) del timings[3:5] cpu_busy = sum(timings) cpu_usage = self.calculate_usage(cpu, cpu_total, cpu_busy) usage['usage_' + cpu] = cpu_usage # for backward compatibility usage['usage'] = usage['usage_cpu'] return usage
[ "def", "get_usage", "(", "self", ")", ":", "usage", "=", "{", "}", "for", "cpu", ",", "timings", "in", "self", ".", "get_cpu_timings", "(", ")", ".", "items", "(", ")", ":", "cpu_total", "=", "sum", "(", "timings", ")", "del", "timings", "[", "3", ...
parses /proc/stat and calcualtes total and busy time (more specific USER_HZ see man 5 proc for further informations )
[ "parses", "/", "proc", "/", "stat", "and", "calcualtes", "total", "and", "busy", "time", "(", "more", "specific", "USER_HZ", "see", "man", "5", "proc", "for", "further", "informations", ")" ]
python
train
29.684211
quantmind/pulsar
pulsar/apps/data/redis/client.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/data/redis/client.py#L434-L498
def sort(self, key, start=None, num=None, by=None, get=None, desc=False, alpha=False, store=None, groups=False): '''Sort and return the list, set or sorted set at ``key``. ``start`` and ``num`` allow for paging through the sorted data ``by`` allows using an external key to weight and sort the items. Use an "*" to indicate where in the key the item value is located ``get`` allows for returning items from external keys rather than the sorted data itself. Use an "*" to indicate where int he key the item value is located ``desc`` allows for reversing the sort ``alpha`` allows for sorting lexicographically rather than numerically ``store`` allows for storing the result of the sort into the key ``store`` ``groups`` if set to True and if ``get`` contains at least two elements, sort will return a list of tuples, each containing the values fetched from the arguments to ``get``. ''' if ((start is not None and num is None) or (num is not None and start is None)): raise CommandError("``start`` and ``num`` must both be specified") pieces = [key] if by is not None: pieces.append('BY') pieces.append(by) if start is not None and num is not None: pieces.append('LIMIT') pieces.append(start) pieces.append(num) if get is not None: # If get is a string assume we want to get a single value. # Otherwise assume it's an interable and we want to get multiple # values. We can't just iterate blindly because strings are # iterable. if isinstance(get, str): pieces.append('GET') pieces.append(get) else: for g in get: pieces.append('GET') pieces.append(g) if desc: pieces.append('DESC') if alpha: pieces.append('ALPHA') if store is not None: pieces.append('STORE') pieces.append(store) if groups: if not get or isinstance(get, str) or len(get) < 2: raise CommandError('when using "groups" the "get" argument ' 'must be specified and contain at least ' 'two keys') options = {'groups': len(get) if groups else None} return self.execute_command('SORT', *pieces, **options)
[ "def", "sort", "(", "self", ",", "key", ",", "start", "=", "None", ",", "num", "=", "None", ",", "by", "=", "None", ",", "get", "=", "None", ",", "desc", "=", "False", ",", "alpha", "=", "False", ",", "store", "=", "None", ",", "groups", "=", ...
Sort and return the list, set or sorted set at ``key``. ``start`` and ``num`` allow for paging through the sorted data ``by`` allows using an external key to weight and sort the items. Use an "*" to indicate where in the key the item value is located ``get`` allows for returning items from external keys rather than the sorted data itself. Use an "*" to indicate where int he key the item value is located ``desc`` allows for reversing the sort ``alpha`` allows for sorting lexicographically rather than numerically ``store`` allows for storing the result of the sort into the key ``store`` ``groups`` if set to True and if ``get`` contains at least two elements, sort will return a list of tuples, each containing the values fetched from the arguments to ``get``.
[ "Sort", "and", "return", "the", "list", "set", "or", "sorted", "set", "at", "key", "." ]
python
train
39.246154
blockstack-packages/keychain-manager-py
keychain/utils.py
https://github.com/blockstack-packages/keychain-manager-py/blob/c15c4ed8f3ed155f71ccac7c13ee08f081d38c06/keychain/utils.py#L28-L38
def bip32_serialize(rawtuple): """ Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools) by Vitalik Buterin """ vbytes, depth, fingerprint, i, chaincode, key = rawtuple i = encode(i, 256, 4) chaincode = encode(hash_to_int(chaincode), 256, 32) keydata = b'\x00' +key[:-1] if vbytes in PRIVATE else key bindata = vbytes + from_int_to_byte(depth % 256) + fingerprint + i + chaincode + keydata return changebase(bindata + bin_dbl_sha256(bindata)[:4], 256, 58)
[ "def", "bip32_serialize", "(", "rawtuple", ")", ":", "vbytes", ",", "depth", ",", "fingerprint", ",", "i", ",", "chaincode", ",", "key", "=", "rawtuple", "i", "=", "encode", "(", "i", ",", "256", ",", "4", ")", "chaincode", "=", "encode", "(", "hash_...
Derived from code from pybitcointools (https://github.com/vbuterin/pybitcointools) by Vitalik Buterin
[ "Derived", "from", "code", "from", "pybitcointools", "(", "https", ":", "//", "github", ".", "com", "/", "vbuterin", "/", "pybitcointools", ")", "by", "Vitalik", "Buterin" ]
python
test
46.818182
gwww/elkm1
elkm1_lib/lights.py
https://github.com/gwww/elkm1/blob/078d0de30840c3fab46f1f8534d98df557931e91/elkm1_lib/lights.py#L14-L21
def level(self, level, time=0): """(Helper) Set light to specified level""" if level <= 0: self._elk.send(pf_encode(self._index)) elif level >= 98: self._elk.send(pn_encode(self._index)) else: self._elk.send(pc_encode(self._index, 9, level, time))
[ "def", "level", "(", "self", ",", "level", ",", "time", "=", "0", ")", ":", "if", "level", "<=", "0", ":", "self", ".", "_elk", ".", "send", "(", "pf_encode", "(", "self", ".", "_index", ")", ")", "elif", "level", ">=", "98", ":", "self", ".", ...
(Helper) Set light to specified level
[ "(", "Helper", ")", "Set", "light", "to", "specified", "level" ]
python
train
38.5
saltstack/salt
salt/modules/solr.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/solr.py#L1198-L1252
def full_import(handler, host=None, core_name=None, options=None, extra=None): ''' MASTER ONLY Submits an import command to the specified handler using specified options. This command can only be run if the minion is configured with solr.type=master handler : str The name of the data import handler. host : str (None) The solr host to query. __opts__['host'] is default. core : str (None) The core the handler belongs to. options : dict (__opts__) A list of options such as clean, optimize commit, verbose, and pause_replication. leave blank to use __opts__ defaults. options will be merged with __opts__ extra : dict ([]) Extra name value pairs to pass to the handler. e.g. ["name=value"] Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.full_import dataimport None music {'clean':True} ''' options = {} if options is None else options extra = [] if extra is None else extra if not _is_master(): err = ['solr.full_import can only be called on "master" minions'] return _get_return_dict(False, errors=err) if _get_none_or_value(core_name) is None and _check_for_cores(): err = ['No core specified when minion is configured as "multi-core".'] return _get_return_dict(False, err) resp = _pre_index_check(handler, host, core_name) if not resp['success']: return resp options = _merge_options(options) if options['clean']: resp = set_replication_enabled(False, host=host, core_name=core_name) if not resp['success']: errors = ['Failed to set the replication status on the master.'] return _get_return_dict(False, errors=errors) params = ['command=full-import'] for key, val in six.iteritems(options): params.append('&{0}={1}'.format(key, val)) url = _format_url(handler, host=host, core_name=core_name, extra=params + extra) return _http_request(url)
[ "def", "full_import", "(", "handler", ",", "host", "=", "None", ",", "core_name", "=", "None", ",", "options", "=", "None", ",", "extra", "=", "None", ")", ":", "options", "=", "{", "}", "if", "options", "is", "None", "else", "options", "extra", "=",...
MASTER ONLY Submits an import command to the specified handler using specified options. This command can only be run if the minion is configured with solr.type=master handler : str The name of the data import handler. host : str (None) The solr host to query. __opts__['host'] is default. core : str (None) The core the handler belongs to. options : dict (__opts__) A list of options such as clean, optimize commit, verbose, and pause_replication. leave blank to use __opts__ defaults. options will be merged with __opts__ extra : dict ([]) Extra name value pairs to pass to the handler. e.g. ["name=value"] Return : dict<str,obj>:: {'success':boolean, 'data':dict, 'errors':list, 'warnings':list} CLI Example: .. code-block:: bash salt '*' solr.full_import dataimport None music {'clean':True}
[ "MASTER", "ONLY", "Submits", "an", "import", "command", "to", "the", "specified", "handler", "using", "specified", "options", ".", "This", "command", "can", "only", "be", "run", "if", "the", "minion", "is", "configured", "with", "solr", ".", "type", "=", "...
python
train
37.745455
devricks/soft_drf
soft_drf/auth/utilities.py
https://github.com/devricks/soft_drf/blob/1869b13f9341bfcebd931059e93de2bc38570da3/soft_drf/auth/utilities.py#L107-L119
def create_token(user): """ Create token. """ payload = jwt_payload_handler(user) if api_settings.JWT_ALLOW_REFRESH: payload['orig_iat'] = timegm( datetime.utcnow().utctimetuple() ) # Return values token = jwt_encode_handler(payload) return token
[ "def", "create_token", "(", "user", ")", ":", "payload", "=", "jwt_payload_handler", "(", "user", ")", "if", "api_settings", ".", "JWT_ALLOW_REFRESH", ":", "payload", "[", "'orig_iat'", "]", "=", "timegm", "(", "datetime", ".", "utcnow", "(", ")", ".", "ut...
Create token.
[ "Create", "token", "." ]
python
train
22.692308
pseudonym117/Riot-Watcher
src/riotwatcher/Handlers/RateLimit/RateLimitHandler.py
https://github.com/pseudonym117/Riot-Watcher/blob/21ab12453a0d824d67e30f5514d02a5c5a411dea/src/riotwatcher/Handlers/RateLimit/RateLimitHandler.py#L54-L67
def after_request(self, region, endpoint_name, method_name, url, response): """ Called after a response is received and before it is returned to the user. :param string region: the region of this request :param string endpoint_name: the name of the endpoint that was requested :param string method_name: the name of the method that was requested :param url: The url that was requested :param response: the response received. This is a response from the Requests library """ for limiter in self._limiters: limiter.update_limiter(region, endpoint_name, method_name, response) return response
[ "def", "after_request", "(", "self", ",", "region", ",", "endpoint_name", ",", "method_name", ",", "url", ",", "response", ")", ":", "for", "limiter", "in", "self", ".", "_limiters", ":", "limiter", ".", "update_limiter", "(", "region", ",", "endpoint_name",...
Called after a response is received and before it is returned to the user. :param string region: the region of this request :param string endpoint_name: the name of the endpoint that was requested :param string method_name: the name of the method that was requested :param url: The url that was requested :param response: the response received. This is a response from the Requests library
[ "Called", "after", "a", "response", "is", "received", "and", "before", "it", "is", "returned", "to", "the", "user", "." ]
python
train
47.857143
jgorset/facebook
facebook/entity.py
https://github.com/jgorset/facebook/blob/90f035ae1828e4eeb7af428964fedf0ee99ec2ad/facebook/entity.py#L29-L34
def cache(self): """Query or return the Graph API representation of this resource.""" if not self._cache: self._cache = self.graph.get('%s' % self.id) return self._cache
[ "def", "cache", "(", "self", ")", ":", "if", "not", "self", ".", "_cache", ":", "self", ".", "_cache", "=", "self", ".", "graph", ".", "get", "(", "'%s'", "%", "self", ".", "id", ")", "return", "self", ".", "_cache" ]
Query or return the Graph API representation of this resource.
[ "Query", "or", "return", "the", "Graph", "API", "representation", "of", "this", "resource", "." ]
python
train
33.5
djaodjin/djaodjin-deployutils
deployutils/apps/flask/templates.py
https://github.com/djaodjin/djaodjin-deployutils/blob/a0fe3cf3030dbbf09025c69ce75a69b326565dd8/deployutils/apps/flask/templates.py#L31-L48
def site_prefixed(path): """ *Mockup*: adds the path prefix when required. """ if path is None: path = '' if settings.DEBUG and hasattr(settings, 'APP_NAME'): path_prefix = '/%s' % settings.APP_NAME else: path_prefix = '' if path: # We have an actual path instead of generating a prefix that will # be placed in front of static urls (ie. {{'pricing'|site_prefixed}} # insted of {{''|site_prefixed}}{{ASSET_URL}}). path_prefix += '/' if path.startswith('/'): path = path[1:] return urljoin(path_prefix, path)
[ "def", "site_prefixed", "(", "path", ")", ":", "if", "path", "is", "None", ":", "path", "=", "''", "if", "settings", ".", "DEBUG", "and", "hasattr", "(", "settings", ",", "'APP_NAME'", ")", ":", "path_prefix", "=", "'/%s'", "%", "settings", ".", "APP_N...
*Mockup*: adds the path prefix when required.
[ "*", "Mockup", "*", ":", "adds", "the", "path", "prefix", "when", "required", "." ]
python
train
33.222222
pmacosta/pexdoc
pexdoc/pinspect.py
https://github.com/pmacosta/pexdoc/blob/201ac243e5781347feb75896a4231429fe6da4b1/pexdoc/pinspect.py#L531-L576
def load(self, callables_fname): r""" Load traced modules information from a `JSON <http://www.json.org/>`_ file. The loaded module information is merged with any existing module information :param callables_fname: File name :type callables_fname: :ref:`FileNameExists` :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \\`callables_fname\\` is not valid) """ # Validate file name _validate_fname(callables_fname) if not os.path.exists(callables_fname): raise OSError("File {0} could not be found".format(callables_fname)) with open(callables_fname, "r") as fobj: fdict = json.load(fobj) if sys.hexversion < 0x03000000: # pragma: no cover fdict = _unicode_to_ascii(fdict) self._callables_db.update(fdict["_callables_db"]) # Reverse the tuple-to-string conversion that the save method # does due to the fact that JSON keys need to be strings and the # keys of the reverse callable dictionary are tuples where the first # item is a file name and the second item is the starting line of the # callable within that file (dictionary value) rdict = {} for key, value in fdict["_reverse_callables_db"].items(): tokens = key[1:-1].split(",") key = tokens[0].strip()[1:-1] if platform.system().lower() == "windows": # pragma: no cover while True: tmp = key key = key.replace("\\\\", "\\") if tmp == key: break rdict[(key, int(tokens[1]))] = value self._reverse_callables_db.update(rdict) self._modules_dict.update(fdict["_modules_dict"]) self._fnames.update(fdict["_fnames"]) self._module_names.extend(fdict["_module_names"]) self._class_names.extend(fdict["_class_names"]) self._module_names = sorted(list(set(self._module_names))) self._class_names = sorted(list(set(self._class_names)))
[ "def", "load", "(", "self", ",", "callables_fname", ")", ":", "# Validate file name", "_validate_fname", "(", "callables_fname", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "callables_fname", ")", ":", "raise", "OSError", "(", "\"File {0} could not ...
r""" Load traced modules information from a `JSON <http://www.json.org/>`_ file. The loaded module information is merged with any existing module information :param callables_fname: File name :type callables_fname: :ref:`FileNameExists` :raises: * OSError (File *[fname]* could not be found) * RuntimeError (Argument \\`callables_fname\\` is not valid)
[ "r", "Load", "traced", "modules", "information", "from", "a", "JSON", "<http", ":", "//", "www", ".", "json", ".", "org", "/", ">", "_", "file", "." ]
python
train
45.434783
flowersteam/explauto
explauto/sensorimotor_model/inverse/inverse.py
https://github.com/flowersteam/explauto/blob/cf0f81ecb9f6412f7276a95bd27359000e1e26b6/explauto/sensorimotor_model/inverse/inverse.py#L81-L93
def _guess_x_kmeans(self, y_desired, **kwargs): """Provide an initial guesses for a probable x from y""" k = kwargs.get('k', self.k) _, indexes = self.fmodel.dataset.nn_y(y_desired, k=k) X = np.array([self.fmodel.get_x(i) for i in indexes]) if np.sum(X) == 0.: centroids = [self.fmodel.get_x(indexes[0])] else: try: centroids, _ = kmeans2(X, 2) except np.linalg.linalg.LinAlgError: centroids = [self.fmodel.get_x(indexes[0])] return centroids
[ "def", "_guess_x_kmeans", "(", "self", ",", "y_desired", ",", "*", "*", "kwargs", ")", ":", "k", "=", "kwargs", ".", "get", "(", "'k'", ",", "self", ".", "k", ")", "_", ",", "indexes", "=", "self", ".", "fmodel", ".", "dataset", ".", "nn_y", "(",...
Provide an initial guesses for a probable x from y
[ "Provide", "an", "initial", "guesses", "for", "a", "probable", "x", "from", "y" ]
python
train
42.615385
secdev/scapy
scapy/packet.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/packet.py#L1588-L1600
def split_bottom_up(lower, upper, __fval=None, **fval): """This call un-links an association that was made using bind_bottom_up. Have a look at help(bind_bottom_up) """ if __fval is not None: fval.update(__fval) def do_filter(params, cls): params_is_invalid = any( k not in params or params[k] != v for k, v in six.iteritems(fval) ) return cls != upper or params_is_invalid lower.payload_guess = [x for x in lower.payload_guess if do_filter(*x)]
[ "def", "split_bottom_up", "(", "lower", ",", "upper", ",", "__fval", "=", "None", ",", "*", "*", "fval", ")", ":", "if", "__fval", "is", "not", "None", ":", "fval", ".", "update", "(", "__fval", ")", "def", "do_filter", "(", "params", ",", "cls", "...
This call un-links an association that was made using bind_bottom_up. Have a look at help(bind_bottom_up)
[ "This", "call", "un", "-", "links", "an", "association", "that", "was", "made", "using", "bind_bottom_up", ".", "Have", "a", "look", "at", "help", "(", "bind_bottom_up", ")" ]
python
train
38.615385
openstack/networking-cisco
networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py
https://github.com/openstack/networking-cisco/blob/aa58a30aec25b86f9aa5952b0863045975debfa9/networking_cisco/plugins/cisco/cfg_agent/device_drivers/iosxe/iosxe_routing_driver.py#L384-L403
def _check_acl(self, acl_no, network, netmask): """Check a ACL config exists in the running config. :param acl_no: access control list (ACL) number :param network: network which this ACL permits :param netmask: netmask of the network :return: """ exp_cfg_lines = ['ip access-list standard ' + str(acl_no), ' permit ' + str(network) + ' ' + str(netmask)] ios_cfg = self._get_running_config() parse = HTParser(ios_cfg) acls_raw = parse.find_children(exp_cfg_lines[0]) if acls_raw: if exp_cfg_lines[1] in acls_raw: return True LOG.error("Mismatch in ACL configuration for %s", acl_no) return False LOG.debug("%s is not present in config", acl_no) return False
[ "def", "_check_acl", "(", "self", ",", "acl_no", ",", "network", ",", "netmask", ")", ":", "exp_cfg_lines", "=", "[", "'ip access-list standard '", "+", "str", "(", "acl_no", ")", ",", "' permit '", "+", "str", "(", "network", ")", "+", "' '", "+", "str"...
Check a ACL config exists in the running config. :param acl_no: access control list (ACL) number :param network: network which this ACL permits :param netmask: netmask of the network :return:
[ "Check", "a", "ACL", "config", "exists", "in", "the", "running", "config", "." ]
python
train
40.95
rosenbrockc/fortpy
fortpy/utility.py
https://github.com/rosenbrockc/fortpy/blob/1ed0757c52d549e41d9d44bdea68cb89529293a5/fortpy/utility.py#L17-L39
def symlink(source, target, isfile=True): """Creates a symlink at target *file* pointing to source. :arg isfile: when True, if symlinking is disabled in the global config, the file is copied instead with fortpy.utility.copyfile; otherwise fortpy.utility.copy is used and the target is considered a directory. """ from fortpy.code import config from os import path if config.symlink: from os import symlink, remove if path.isfile(target) or path.islink(target): remove(target) elif path.isdir(target): msg.warn("Cannot auto-delete directory '{}' for symlinking.".format(target)) return symlink(source, target) else: msg.info(" COPY: {}".format(source)) if isfile: copyfile(source, target) else: copy(source, target)
[ "def", "symlink", "(", "source", ",", "target", ",", "isfile", "=", "True", ")", ":", "from", "fortpy", ".", "code", "import", "config", "from", "os", "import", "path", "if", "config", ".", "symlink", ":", "from", "os", "import", "symlink", ",", "remov...
Creates a symlink at target *file* pointing to source. :arg isfile: when True, if symlinking is disabled in the global config, the file is copied instead with fortpy.utility.copyfile; otherwise fortpy.utility.copy is used and the target is considered a directory.
[ "Creates", "a", "symlink", "at", "target", "*", "file", "*", "pointing", "to", "source", "." ]
python
train
37
marcomusy/vtkplotter
vtkplotter/actors.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/actors.py#L870-L884
def color(self, c=False): """ Set/get actor's color. If None is passed as input, will use colors from active scalars. Same as `c()`. """ if c is False: return np.array(self.GetProperty().GetColor()) elif c is None: self.GetMapper().ScalarVisibilityOn() return self else: self.GetMapper().ScalarVisibilityOff() self.GetProperty().SetColor(colors.getColor(c)) return self
[ "def", "color", "(", "self", ",", "c", "=", "False", ")", ":", "if", "c", "is", "False", ":", "return", "np", ".", "array", "(", "self", ".", "GetProperty", "(", ")", ".", "GetColor", "(", ")", ")", "elif", "c", "is", "None", ":", "self", ".", ...
Set/get actor's color. If None is passed as input, will use colors from active scalars. Same as `c()`.
[ "Set", "/", "get", "actor", "s", "color", ".", "If", "None", "is", "passed", "as", "input", "will", "use", "colors", "from", "active", "scalars", ".", "Same", "as", "c", "()", "." ]
python
train
32.733333
Galarzaa90/tibia.py
tibiapy/utils.py
https://github.com/Galarzaa90/tibia.py/blob/02ba1a8f1e18177ef5c7dcd44affc8d761d59e12/tibiapy/utils.py#L242-L262
def parse_tibiacom_content(content, *, html_class="BoxContent", tag="div", builder="lxml"): """Parses HTML content from Tibia.com into a BeautifulSoup object. Parameters ---------- content: :class:`str` The raw HTML content from Tibia.com html_class: :class:`str` The HTML class of the parsed element. The default value is ``BoxContent``. tag: :class:`str` The HTML tag select. The default value is ``div``. builder: :class:`str` The builder to use. The default value is ``lxml``. Returns ------- :class:`bs4.BeautifulSoup`, optional The parsed content. """ return bs4.BeautifulSoup(content.replace('ISO-8859-1', 'utf-8'), builder, parse_only=bs4.SoupStrainer(tag, class_=html_class))
[ "def", "parse_tibiacom_content", "(", "content", ",", "*", ",", "html_class", "=", "\"BoxContent\"", ",", "tag", "=", "\"div\"", ",", "builder", "=", "\"lxml\"", ")", ":", "return", "bs4", ".", "BeautifulSoup", "(", "content", ".", "replace", "(", "'ISO-8859...
Parses HTML content from Tibia.com into a BeautifulSoup object. Parameters ---------- content: :class:`str` The raw HTML content from Tibia.com html_class: :class:`str` The HTML class of the parsed element. The default value is ``BoxContent``. tag: :class:`str` The HTML tag select. The default value is ``div``. builder: :class:`str` The builder to use. The default value is ``lxml``. Returns ------- :class:`bs4.BeautifulSoup`, optional The parsed content.
[ "Parses", "HTML", "content", "from", "Tibia", ".", "com", "into", "a", "BeautifulSoup", "object", "." ]
python
train
37.285714
Crunch-io/crunch-cube
src/cr/cube/dimension.py
https://github.com/Crunch-io/crunch-cube/blob/a837840755690eb14b2ec8e8d93b4104e01c854f/src/cr/cube/dimension.py#L322-L357
def labels( self, include_missing=False, include_transforms=False, include_cat_ids=False ): """Return list of str labels for the elements of this dimension. Returns a list of (label, element_id) pairs if *include_cat_ids* is True. The `element_id` value in the second position of the pair is None for subtotal items (which don't have an element-id). """ # TODO: Having an alternate return type triggered by a flag-parameter # (`include_cat_ids` in this case) is poor practice. Using flags like # that effectively squashes what should be two methods into one. # Either get rid of the need for that alternate return value type or # create a separate method for it. elements = self.all_elements if include_missing else self.valid_elements include_subtotals = include_transforms and self.dimension_type != DT.CA_SUBVAR # ---items are elements or subtotals, interleaved in display order--- interleaved_items = tuple(self._iter_interleaved_items(elements)) labels = list( item.label for item in interleaved_items if include_subtotals or not item.is_insertion ) if include_cat_ids: element_ids = tuple( None if item.is_insertion else item.element_id for item in interleaved_items if include_subtotals or not item.is_insertion ) return list(zip(labels, element_ids)) return labels
[ "def", "labels", "(", "self", ",", "include_missing", "=", "False", ",", "include_transforms", "=", "False", ",", "include_cat_ids", "=", "False", ")", ":", "# TODO: Having an alternate return type triggered by a flag-parameter", "# (`include_cat_ids` in this case) is poor prac...
Return list of str labels for the elements of this dimension. Returns a list of (label, element_id) pairs if *include_cat_ids* is True. The `element_id` value in the second position of the pair is None for subtotal items (which don't have an element-id).
[ "Return", "list", "of", "str", "labels", "for", "the", "elements", "of", "this", "dimension", "." ]
python
train
42.194444
HazyResearch/pdftotree
pdftotree/utils/pdf/layout_utils.py
https://github.com/HazyResearch/pdftotree/blob/5890d668b475d5d3058d1d886aafbfd83268c440/pdftotree/utils/pdf/layout_utils.py#L17-L25
def traverse_layout(root, callback): """ Tree walker and invokes the callback as it traverse pdf object tree """ callback(root) if isinstance(root, collections.Iterable): for child in root: traverse_layout(child, callback)
[ "def", "traverse_layout", "(", "root", ",", "callback", ")", ":", "callback", "(", "root", ")", "if", "isinstance", "(", "root", ",", "collections", ".", "Iterable", ")", ":", "for", "child", "in", "root", ":", "traverse_layout", "(", "child", ",", "call...
Tree walker and invokes the callback as it traverse pdf object tree
[ "Tree", "walker", "and", "invokes", "the", "callback", "as", "it", "traverse", "pdf", "object", "tree" ]
python
train
28.666667
fhcrc/seqmagick
seqmagick/transform.py
https://github.com/fhcrc/seqmagick/blob/1642bb87ba5c171fbd307f9da0f8a0ee1d69d5ed/seqmagick/transform.py#L134-L143
def include_from_file(records, handle): """ Filter the records, keeping only sequences whose ID is contained in the handle. """ ids = set(i.strip() for i in handle) for record in records: if record.id.strip() in ids: yield record
[ "def", "include_from_file", "(", "records", ",", "handle", ")", ":", "ids", "=", "set", "(", "i", ".", "strip", "(", ")", "for", "i", "in", "handle", ")", "for", "record", "in", "records", ":", "if", "record", ".", "id", ".", "strip", "(", ")", "...
Filter the records, keeping only sequences whose ID is contained in the handle.
[ "Filter", "the", "records", "keeping", "only", "sequences", "whose", "ID", "is", "contained", "in", "the", "handle", "." ]
python
train
26.5
moluwole/Bast
bast/controller.py
https://github.com/moluwole/Bast/blob/eecf55ae72e6f24af7c101549be0422cd2c1c95a/bast/controller.py#L37-L51
def write_error(self, status_code, **kwargs): """ Handle Exceptions from the server. Formats the HTML into readable form """ reason = self._reason if self.settings.get("serve_traceback") and "exc_info" in kwargs: error = [] for line in traceback.format_exception(*kwargs["exc_info"]): error.append(line) else: error = None data = {'_traceback': error, 'message': reason, 'code': status_code} content = self.render_exception(**data) self.write(content)
[ "def", "write_error", "(", "self", ",", "status_code", ",", "*", "*", "kwargs", ")", ":", "reason", "=", "self", ".", "_reason", "if", "self", ".", "settings", ".", "get", "(", "\"serve_traceback\"", ")", "and", "\"exc_info\"", "in", "kwargs", ":", "erro...
Handle Exceptions from the server. Formats the HTML into readable form
[ "Handle", "Exceptions", "from", "the", "server", ".", "Formats", "the", "HTML", "into", "readable", "form" ]
python
train
37.466667
dingusdk/PythonIhcSdk
ihcsdk/ihccontroller.py
https://github.com/dingusdk/PythonIhcSdk/blob/7e2067e009fe7600b49f30bff1cf91dc72fc891e/ihcsdk/ihccontroller.py#L75-L85
def get_project(self) -> str: """ Get the ihc project and make sure controller is ready before""" with IHCController._mutex: if self._project is None: if self.client.get_state() != IHCSTATE_READY: ready = self.client.wait_for_state_change(IHCSTATE_READY, 10) if ready != IHCSTATE_READY: return None self._project = self.client.get_project() return self._project
[ "def", "get_project", "(", "self", ")", "->", "str", ":", "with", "IHCController", ".", "_mutex", ":", "if", "self", ".", "_project", "is", "None", ":", "if", "self", ".", "client", ".", "get_state", "(", ")", "!=", "IHCSTATE_READY", ":", "ready", "=",...
Get the ihc project and make sure controller is ready before
[ "Get", "the", "ihc", "project", "and", "make", "sure", "controller", "is", "ready", "before" ]
python
train
49.545455
facelessuser/soupsieve
soupsieve/css_match.py
https://github.com/facelessuser/soupsieve/blob/24859cc3e756ebf46b75547d49c6b4a7bf35ee82/soupsieve/css_match.py#L236-L242
def get_parent(self, el, no_iframe=False): """Get parent.""" parent = el.parent if no_iframe and parent is not None and self.is_iframe(parent): parent = None return parent
[ "def", "get_parent", "(", "self", ",", "el", ",", "no_iframe", "=", "False", ")", ":", "parent", "=", "el", ".", "parent", "if", "no_iframe", "and", "parent", "is", "not", "None", "and", "self", ".", "is_iframe", "(", "parent", ")", ":", "parent", "=...
Get parent.
[ "Get", "parent", "." ]
python
train
30