repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
DLR-RM/RAFCON
source/rafcon/gui/mygaphas/utils/gap_draw_helper.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/mygaphas/utils/gap_draw_helper.py#L416-L445
def draw_label_path(context, width, height, arrow_height, distance_to_port, port_offset): """Draws the path for an upright label :param context: The Cairo context :param float width: Width of the label :param float height: Height of the label :param float distance_to_port: Distance to the port related to the label :param float port_offset: Distance from the port center to its border :param bool draw_connection_to_port: Whether to draw a line from the tip of the label to the port """ c = context # The current point is the port position # Mover to outer border of state c.rel_move_to(0, port_offset) # Draw line to arrow tip of label c.rel_line_to(0, distance_to_port) # Line to upper left corner c.rel_line_to(-width / 2., arrow_height) # Line to lower left corner c.rel_line_to(0, height - arrow_height) # Line to lower right corner c.rel_line_to(width, 0) # Line to upper right corner c.rel_line_to(0, -(height - arrow_height)) # Line to center top (tip of label) c.rel_line_to(-width / 2., -arrow_height) # Close path c.close_path()
[ "def", "draw_label_path", "(", "context", ",", "width", ",", "height", ",", "arrow_height", ",", "distance_to_port", ",", "port_offset", ")", ":", "c", "=", "context", "# The current point is the port position", "# Mover to outer border of state", "c", ".", "rel_move_to...
Draws the path for an upright label :param context: The Cairo context :param float width: Width of the label :param float height: Height of the label :param float distance_to_port: Distance to the port related to the label :param float port_offset: Distance from the port center to its border :param bool draw_connection_to_port: Whether to draw a line from the tip of the label to the port
[ "Draws", "the", "path", "for", "an", "upright", "label" ]
python
train
spyder-ide/spyder
spyder/plugins/projects/projecttypes/python.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/projects/projecttypes/python.py#L51-L60
def remove_from_pythonpath(self, path): """Remove path from project's PYTHONPATH Return True if path was removed, False if it was not found""" pathlist = self.get_pythonpath() if path in pathlist: pathlist.pop(pathlist.index(path)) self.set_pythonpath(pathlist) return True else: return False
[ "def", "remove_from_pythonpath", "(", "self", ",", "path", ")", ":", "pathlist", "=", "self", ".", "get_pythonpath", "(", ")", "if", "path", "in", "pathlist", ":", "pathlist", ".", "pop", "(", "pathlist", ".", "index", "(", "path", ")", ")", "self", "....
Remove path from project's PYTHONPATH Return True if path was removed, False if it was not found
[ "Remove", "path", "from", "project", "s", "PYTHONPATH", "Return", "True", "if", "path", "was", "removed", "False", "if", "it", "was", "not", "found" ]
python
train
OLC-LOC-Bioinformatics/GenomeQAML
genomeqaml/extract_features.py
https://github.com/OLC-LOC-Bioinformatics/GenomeQAML/blob/2953e574c185afab23075641da4ce5392bc003e9/genomeqaml/extract_features.py#L313-L334
def find_l50(contig_lengths_dict, genome_length_dict): """ Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L50 """ # Initialise the dictionary l50_dict = dict() for file_name, contig_lengths in contig_lengths_dict.items(): currentlength = 0 # Initialise a variable to count how many contigs have been added to the currentlength variable currentcontig = 0 for contig_length in contig_lengths: currentlength += contig_length # Increment :currentcontig each time a contig is added to the current length currentcontig += 1 # Same logic as with the N50, but the contig number is added instead of the length of the contig if currentlength >= genome_length_dict[file_name] * 0.5: l50_dict[file_name] = currentcontig break return l50_dict
[ "def", "find_l50", "(", "contig_lengths_dict", ",", "genome_length_dict", ")", ":", "# Initialise the dictionary", "l50_dict", "=", "dict", "(", ")", "for", "file_name", ",", "contig_lengths", "in", "contig_lengths_dict", ".", "items", "(", ")", ":", "currentlength"...
Calculate the L50 for each strain. L50 is defined as the number of contigs required to achieve the N50 :param contig_lengths_dict: dictionary of strain name: reverse-sorted list of all contig lengths :param genome_length_dict: dictionary of strain name: total genome length :return: l50_dict: dictionary of strain name: L50
[ "Calculate", "the", "L50", "for", "each", "strain", ".", "L50", "is", "defined", "as", "the", "number", "of", "contigs", "required", "to", "achieve", "the", "N50", ":", "param", "contig_lengths_dict", ":", "dictionary", "of", "strain", "name", ":", "reverse"...
python
train
pixelogik/NearPy
nearpy/storage/storage_mongo.py
https://github.com/pixelogik/NearPy/blob/1b534b864d320d875508e95cd2b76b6d8c07a90b/nearpy/storage/storage_mongo.py#L182-L190
def load_hash_configuration(self, hash_name): """ Loads and returns hash configuration """ conf = self.mongo_object.find_one( {'hash_conf_name': hash_name + '_conf'} ) return pickle.loads(conf['hash_configuration']) if conf is not None\ else None
[ "def", "load_hash_configuration", "(", "self", ",", "hash_name", ")", ":", "conf", "=", "self", ".", "mongo_object", ".", "find_one", "(", "{", "'hash_conf_name'", ":", "hash_name", "+", "'_conf'", "}", ")", "return", "pickle", ".", "loads", "(", "conf", "...
Loads and returns hash configuration
[ "Loads", "and", "returns", "hash", "configuration" ]
python
train
igorcoding/asynctnt-queue
asynctnt_queue/queue.py
https://github.com/igorcoding/asynctnt-queue/blob/75719b2dd27e8314ae924aea6a7a85be8f48ecc5/asynctnt_queue/queue.py#L58-L72
def tube(self, name): """ Returns tube by its name :param name: Tube name :returns: ``self.tube_cls`` instance (by default :class:`asynctnt_queue.Tube`) """ if name in self._tubes: return self._tubes[name] assert name, 'Tube name must be specified' t = self._tube_cls(self, name) self._tubes[name] = t return t
[ "def", "tube", "(", "self", ",", "name", ")", ":", "if", "name", "in", "self", ".", "_tubes", ":", "return", "self", ".", "_tubes", "[", "name", "]", "assert", "name", ",", "'Tube name must be specified'", "t", "=", "self", ".", "_tube_cls", "(", "self...
Returns tube by its name :param name: Tube name :returns: ``self.tube_cls`` instance (by default :class:`asynctnt_queue.Tube`)
[ "Returns", "tube", "by", "its", "name" ]
python
train
mlperf/training
object_detection/pytorch/maskrcnn_benchmark/modeling/rpn/inference.py
https://github.com/mlperf/training/blob/1c6ae725a81d15437a2b2df05cac0673fde5c3a4/object_detection/pytorch/maskrcnn_benchmark/modeling/rpn/inference.py#L123-L150
def forward(self, anchors, objectness, box_regression, targets=None): """ Arguments: anchors: list[list[BoxList]] objectness: list[tensor] box_regression: list[tensor] Returns: boxlists (list[BoxList]): the post-processed anchors, after applying box decoding and NMS """ sampled_boxes = [] num_levels = len(objectness) anchors = list(zip(*anchors)) for a, o, b in zip(anchors, objectness, box_regression): sampled_boxes.append(self.forward_for_single_feature_map(a, o, b)) boxlists = list(zip(*sampled_boxes)) boxlists = [cat_boxlist(boxlist) for boxlist in boxlists] if num_levels > 1: boxlists = self.select_over_all_levels(boxlists) # append ground-truth bboxes to proposals if self.training and targets is not None: boxlists = self.add_gt_proposals(boxlists, targets) return boxlists
[ "def", "forward", "(", "self", ",", "anchors", ",", "objectness", ",", "box_regression", ",", "targets", "=", "None", ")", ":", "sampled_boxes", "=", "[", "]", "num_levels", "=", "len", "(", "objectness", ")", "anchors", "=", "list", "(", "zip", "(", "...
Arguments: anchors: list[list[BoxList]] objectness: list[tensor] box_regression: list[tensor] Returns: boxlists (list[BoxList]): the post-processed anchors, after applying box decoding and NMS
[ "Arguments", ":", "anchors", ":", "list", "[", "list", "[", "BoxList", "]]", "objectness", ":", "list", "[", "tensor", "]", "box_regression", ":", "list", "[", "tensor", "]" ]
python
train
VisTrails/tej
tej/submission.py
https://github.com/VisTrails/tej/blob/b8dedaeb6bdeb650b46cfe6d85e5aa9284fc7f0b/tej/submission.py#L52-L62
def escape_queue(s): """Escapes the path to a queue, e.g. preserves ~ at the begining. """ if isinstance(s, PosixPath): s = unicode_(s) elif isinstance(s, bytes): s = s.decode('utf-8') if s.startswith('~/'): return '~/' + shell_escape(s[2:]) else: return shell_escape(s)
[ "def", "escape_queue", "(", "s", ")", ":", "if", "isinstance", "(", "s", ",", "PosixPath", ")", ":", "s", "=", "unicode_", "(", "s", ")", "elif", "isinstance", "(", "s", ",", "bytes", ")", ":", "s", "=", "s", ".", "decode", "(", "'utf-8'", ")", ...
Escapes the path to a queue, e.g. preserves ~ at the begining.
[ "Escapes", "the", "path", "to", "a", "queue", "e", ".", "g", ".", "preserves", "~", "at", "the", "begining", "." ]
python
train
xflr6/graphviz
graphviz/backend.py
https://github.com/xflr6/graphviz/blob/7376095ef1e47abad7e0b0361b6c9720b706e7a0/graphviz/backend.py#L164-L184
def render(engine, format, filepath, renderer=None, formatter=None, quiet=False): """Render file with Graphviz ``engine`` into ``format``, return result filename. Args: engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). format: The output format used for rendering (``'pdf'``, ``'png'``, ...). filepath: Path to the DOT source file to render. renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). quiet (bool): Suppress ``stderr`` output. Returns: The (possibly relative) path of the rendered file. Raises: ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero. """ cmd, rendered = command(engine, format, filepath, renderer, formatter) run(cmd, capture_output=True, check=True, quiet=quiet) return rendered
[ "def", "render", "(", "engine", ",", "format", ",", "filepath", ",", "renderer", "=", "None", ",", "formatter", "=", "None", ",", "quiet", "=", "False", ")", ":", "cmd", ",", "rendered", "=", "command", "(", "engine", ",", "format", ",", "filepath", ...
Render file with Graphviz ``engine`` into ``format``, return result filename. Args: engine: The layout commmand used for rendering (``'dot'``, ``'neato'``, ...). format: The output format used for rendering (``'pdf'``, ``'png'``, ...). filepath: Path to the DOT source file to render. renderer: The output renderer used for rendering (``'cairo'``, ``'gd'``, ...). formatter: The output formatter used for rendering (``'cairo'``, ``'gd'``, ...). quiet (bool): Suppress ``stderr`` output. Returns: The (possibly relative) path of the rendered file. Raises: ValueError: If ``engine``, ``format``, ``renderer``, or ``formatter`` are not known. graphviz.RequiredArgumentError: If ``formatter`` is given but ``renderer`` is None. graphviz.ExecutableNotFound: If the Graphviz executable is not found. subprocess.CalledProcessError: If the exit status is non-zero.
[ "Render", "file", "with", "Graphviz", "engine", "into", "format", "return", "result", "filename", "." ]
python
train
sony/nnabla
python/src/nnabla/experimental/graph_converters/fixed_point_weight.py
https://github.com/sony/nnabla/blob/aaf3d33b7cbb38f2a03aa754178ba8f7c8481320/python/src/nnabla/experimental/graph_converters/fixed_point_weight.py#L44-L69
def convert(self, vroot, entry_variables): """ All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts. """ self.graph_info = GraphInfo(vroot) self.entry_variables = entry_variables with nn.parameter_scope(self.name): # Function loop in the forward order for t, func in enumerate(self.graph_info.funcs): if func.name in self.inner_prod_functions: inner_prod_func = func o = self._fixed_point_weight_conversion(inner_prod_func) continue # Identity conversion o = self._identity_conversion(func) self.end_variable = o if self.call_forward: o.forward(clear_buffer=True) return self.end_variable
[ "def", "convert", "(", "self", ",", "vroot", ",", "entry_variables", ")", ":", "self", ".", "graph_info", "=", "GraphInfo", "(", "vroot", ")", "self", ".", "entry_variables", "=", "entry_variables", "with", "nn", ".", "parameter_scope", "(", "self", ".", "...
All functions are replaced with the same `new` function. Args: vroot (:obj:`Variable`): NNabla Variable entry_variables (:obj:`Variable`): Entry variable from which the conversion starts.
[ "All", "functions", "are", "replaced", "with", "the", "same", "new", "function", "." ]
python
train
ScottDuckworth/python-anyvcs
anyvcs/svn.py
https://github.com/ScottDuckworth/python-anyvcs/blob/9eb09defbc6b7c99d373fad53cbf8fc81b637923/anyvcs/svn.py#L778-L811
def load( self, stream, progress=None, ignore_uuid=False, force_uuid=False, use_pre_commit_hook=False, use_post_commit_hook=False, parent_dir=None ): """Load a dumpfile stream into the repository. :param stream: A file stream from which the dumpfile is read :param progress: A file stream to which progress is written See ``svnadmin help load`` for details on the other arguments. """ cmd = [SVNADMIN, 'load', '.'] if progress is None: cmd.append('-q') if ignore_uuid: cmd.append('--ignore-uuid') if force_uuid: cmd.append('--force-uuid') if use_pre_commit_hook: cmd.append('--use-pre-commit-hook') if use_post_commit_hook: cmd.append('--use-post-commit-hook') if parent_dir: cmd.extend(['--parent-dir', parent_dir]) p = subprocess.Popen( cmd, cwd=self.path, stdin=stream, stdout=progress, stderr=subprocess.PIPE ) stderr = p.stderr.read() p.stderr.close() p.wait() if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, cmd, stderr)
[ "def", "load", "(", "self", ",", "stream", ",", "progress", "=", "None", ",", "ignore_uuid", "=", "False", ",", "force_uuid", "=", "False", ",", "use_pre_commit_hook", "=", "False", ",", "use_post_commit_hook", "=", "False", ",", "parent_dir", "=", "None", ...
Load a dumpfile stream into the repository. :param stream: A file stream from which the dumpfile is read :param progress: A file stream to which progress is written See ``svnadmin help load`` for details on the other arguments.
[ "Load", "a", "dumpfile", "stream", "into", "the", "repository", "." ]
python
train
DLR-RM/RAFCON
source/rafcon/core/states/state.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/core/states/state.py#L407-L420
def remove_input_data_port(self, data_port_id, force=False, destroy=True): """Remove an input data port from the state :param int data_port_id: the id or the output data port to remove :param bool force: if the removal should be forced without checking constraints :raises exceptions.AttributeError: if the specified input data port does not exist """ if data_port_id in self._input_data_ports: if destroy: self.remove_data_flows_with_data_port_id(data_port_id) self._input_data_ports[data_port_id].parent = None return self._input_data_ports.pop(data_port_id) else: raise AttributeError("input data port with name %s does not exit", data_port_id)
[ "def", "remove_input_data_port", "(", "self", ",", "data_port_id", ",", "force", "=", "False", ",", "destroy", "=", "True", ")", ":", "if", "data_port_id", "in", "self", ".", "_input_data_ports", ":", "if", "destroy", ":", "self", ".", "remove_data_flows_with_...
Remove an input data port from the state :param int data_port_id: the id or the output data port to remove :param bool force: if the removal should be forced without checking constraints :raises exceptions.AttributeError: if the specified input data port does not exist
[ "Remove", "an", "input", "data", "port", "from", "the", "state" ]
python
train
cggh/scikit-allel
allel/io/vcf_read.py
https://github.com/cggh/scikit-allel/blob/3c979a57a100240ba959dd13f98839349530f215/allel/io/vcf_read.py#L74-L100
def _chunk_iter_progress(it, log, prefix): """Wrap a chunk iterator for progress logging.""" n_variants = 0 before_all = time.time() before_chunk = before_all for chunk, chunk_length, chrom, pos in it: after_chunk = time.time() elapsed_chunk = after_chunk - before_chunk elapsed = after_chunk - before_all n_variants += chunk_length chrom = text_type(chrom, 'utf8') message = ( '%s %s rows in %.2fs; chunk in %.2fs (%s rows/s)' % (prefix, n_variants, elapsed, elapsed_chunk, int(chunk_length // elapsed_chunk)) ) if chrom: message += '; %s:%s' % (chrom, pos) print(message, file=log) log.flush() yield chunk, chunk_length, chrom, pos before_chunk = after_chunk after_all = time.time() elapsed = after_all - before_all print('%s all done (%s rows/s)' % (prefix, int(n_variants // elapsed)), file=log) log.flush()
[ "def", "_chunk_iter_progress", "(", "it", ",", "log", ",", "prefix", ")", ":", "n_variants", "=", "0", "before_all", "=", "time", ".", "time", "(", ")", "before_chunk", "=", "before_all", "for", "chunk", ",", "chunk_length", ",", "chrom", ",", "pos", "in...
Wrap a chunk iterator for progress logging.
[ "Wrap", "a", "chunk", "iterator", "for", "progress", "logging", "." ]
python
train
materialsproject/pymatgen
pymatgen/apps/borg/queen.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/apps/borg/queen.py#L64-L83
def parallel_assimilate(self, rootpath): """ Assimilate the entire subdirectory structure in rootpath. """ logger.info('Scanning for valid paths...') valid_paths = [] for (parent, subdirs, files) in os.walk(rootpath): valid_paths.extend(self._drone.get_valid_paths((parent, subdirs, files))) manager = Manager() data = manager.list() status = manager.dict() status['count'] = 0 status['total'] = len(valid_paths) logger.info('{} valid paths found.'.format(len(valid_paths))) p = Pool(self._num_drones) p.map(order_assimilation, ((path, self._drone, data, status) for path in valid_paths)) for d in data: self._data.append(json.loads(d, cls=MontyDecoder))
[ "def", "parallel_assimilate", "(", "self", ",", "rootpath", ")", ":", "logger", ".", "info", "(", "'Scanning for valid paths...'", ")", "valid_paths", "=", "[", "]", "for", "(", "parent", ",", "subdirs", ",", "files", ")", "in", "os", ".", "walk", "(", "...
Assimilate the entire subdirectory structure in rootpath.
[ "Assimilate", "the", "entire", "subdirectory", "structure", "in", "rootpath", "." ]
python
train
deployed/django-emailtemplates
emailtemplates/email.py
https://github.com/deployed/django-emailtemplates/blob/0e95139989dbcf7e624153ddcd7b5b66b48eb6eb/emailtemplates/email.py#L150-L164
def send(self, to, attachment_paths=None, *args, **kwargs): """This function does all the operations on eft object, that are necessary to send email. Usually one would use eft object like this: eft = EmailFromTemplate(name='sth/sth.html') eft.get_object() eft.render_message() eft.send_email(['email@example.com']) return eft.sent """ self.get_object() self.render_message() self.send_email(to, attachment_paths, *args, **kwargs) if self.sent: logger.info(u"Mail has been sent to: %s ", to) return self.sent
[ "def", "send", "(", "self", ",", "to", ",", "attachment_paths", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "get_object", "(", ")", "self", ".", "render_message", "(", ")", "self", ".", "send_email", "(", "to", ",...
This function does all the operations on eft object, that are necessary to send email. Usually one would use eft object like this: eft = EmailFromTemplate(name='sth/sth.html') eft.get_object() eft.render_message() eft.send_email(['email@example.com']) return eft.sent
[ "This", "function", "does", "all", "the", "operations", "on", "eft", "object", "that", "are", "necessary", "to", "send", "email", ".", "Usually", "one", "would", "use", "eft", "object", "like", "this", ":", "eft", "=", "EmailFromTemplate", "(", "name", "="...
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/core/completer.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/core/completer.py#L626-L640
def alias_matches(self, text): """Match internal system aliases""" #print 'Completer->alias_matches:',text,'lb',self.text_until_cursor # dbg # if we are not in the first 'item', alias matching # doesn't make sense - unless we are starting with 'sudo' command. main_text = self.text_until_cursor.lstrip() if ' ' in main_text and not main_text.startswith('sudo'): return [] text = os.path.expanduser(text) aliases = self.alias_table.keys() if text == '': return aliases else: return [a for a in aliases if a.startswith(text)]
[ "def", "alias_matches", "(", "self", ",", "text", ")", ":", "#print 'Completer->alias_matches:',text,'lb',self.text_until_cursor # dbg", "# if we are not in the first 'item', alias matching", "# doesn't make sense - unless we are starting with 'sudo' command.", "main_text", "=", "self", ...
Match internal system aliases
[ "Match", "internal", "system", "aliases" ]
python
test
tornadoweb/tornado
tornado/iostream.py
https://github.com/tornadoweb/tornado/blob/b8b481770bcdb333a69afde5cce7eaa449128326/tornado/iostream.py#L1148-L1219
def connect( self: _IOStreamType, address: tuple, server_hostname: str = None ) -> "Future[_IOStreamType]": """Connects the socket to a remote address without blocking. May only be called if the socket passed to the constructor was not previously connected. The address parameter is in the same format as for `socket.connect <socket.socket.connect>` for the type of socket passed to the IOStream constructor, e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, but will be resolved synchronously and block the IOLoop. If you have a hostname instead of an IP address, the `.TCPClient` class is recommended instead of calling this method directly. `.TCPClient` will do asynchronous DNS resolution and handle both IPv4 and IPv6. If ``callback`` is specified, it will be called with no arguments when the connection is completed; if not this method returns a `.Future` (whose result after a successful connection will be the stream itself). In SSL mode, the ``server_hostname`` parameter will be used for certificate validation (unless disabled in the ``ssl_options``) and SNI (if supported; requires Python 2.7.9+). Note that it is safe to call `IOStream.write <BaseIOStream.write>` while the connection is pending, in which case the data will be written as soon as the connection is ready. Calling `IOStream` read methods before the socket is connected works on some platforms but is non-portable. .. versionchanged:: 4.0 If no callback is given, returns a `.Future`. .. versionchanged:: 4.2 SSL certificates are validated by default; pass ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to the `SSLIOStream` constructor to disable. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead. """ self._connecting = True future = Future() # type: Future[_IOStreamType] self._connect_future = typing.cast("Future[IOStream]", future) try: self.socket.connect(address) except socket.error as e: # In non-blocking mode we expect connect() to raise an # exception with EINPROGRESS or EWOULDBLOCK. # # On freebsd, other errors such as ECONNREFUSED may be # returned immediately when attempting to connect to # localhost, so handle them the same way as an error # reported later in _handle_connect. if ( errno_from_exception(e) not in _ERRNO_INPROGRESS and errno_from_exception(e) not in _ERRNO_WOULDBLOCK ): if future is None: gen_log.warning( "Connect error on fd %s: %s", self.socket.fileno(), e ) self.close(exc_info=e) return future self._add_io_state(self.io_loop.WRITE) return future
[ "def", "connect", "(", "self", ":", "_IOStreamType", ",", "address", ":", "tuple", ",", "server_hostname", ":", "str", "=", "None", ")", "->", "\"Future[_IOStreamType]\"", ":", "self", ".", "_connecting", "=", "True", "future", "=", "Future", "(", ")", "# ...
Connects the socket to a remote address without blocking. May only be called if the socket passed to the constructor was not previously connected. The address parameter is in the same format as for `socket.connect <socket.socket.connect>` for the type of socket passed to the IOStream constructor, e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, but will be resolved synchronously and block the IOLoop. If you have a hostname instead of an IP address, the `.TCPClient` class is recommended instead of calling this method directly. `.TCPClient` will do asynchronous DNS resolution and handle both IPv4 and IPv6. If ``callback`` is specified, it will be called with no arguments when the connection is completed; if not this method returns a `.Future` (whose result after a successful connection will be the stream itself). In SSL mode, the ``server_hostname`` parameter will be used for certificate validation (unless disabled in the ``ssl_options``) and SNI (if supported; requires Python 2.7.9+). Note that it is safe to call `IOStream.write <BaseIOStream.write>` while the connection is pending, in which case the data will be written as soon as the connection is ready. Calling `IOStream` read methods before the socket is connected works on some platforms but is non-portable. .. versionchanged:: 4.0 If no callback is given, returns a `.Future`. .. versionchanged:: 4.2 SSL certificates are validated by default; pass ``ssl_options=dict(cert_reqs=ssl.CERT_NONE)`` or a suitably-configured `ssl.SSLContext` to the `SSLIOStream` constructor to disable. .. versionchanged:: 6.0 The ``callback`` argument was removed. Use the returned `.Future` instead.
[ "Connects", "the", "socket", "to", "a", "remote", "address", "without", "blocking", "." ]
python
train
gwastro/pycbc
pycbc/io/record.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/io/record.py#L1828-L1831
def spin_sy(self): """Returns the y-component of the spin of the secondary mass.""" return conversions.secondary_spin(self.mass1, self.mass2, self.spin1y, self.spin2y)
[ "def", "spin_sy", "(", "self", ")", ":", "return", "conversions", ".", "secondary_spin", "(", "self", ".", "mass1", ",", "self", ".", "mass2", ",", "self", ".", "spin1y", ",", "self", ".", "spin2y", ")" ]
Returns the y-component of the spin of the secondary mass.
[ "Returns", "the", "y", "-", "component", "of", "the", "spin", "of", "the", "secondary", "mass", "." ]
python
train
gawel/aiocron
aiocron/__init__.py
https://github.com/gawel/aiocron/blob/949870b2f7fe1e10e4220f3243c9d4237255d203/aiocron/__init__.py#L63-L69
def initialize(self): """Initialize croniter and related times""" if self.croniter is None: self.time = time.time() self.datetime = datetime.now(self.tz) self.loop_time = self.loop.time() self.croniter = croniter(self.spec, start_time=self.datetime)
[ "def", "initialize", "(", "self", ")", ":", "if", "self", ".", "croniter", "is", "None", ":", "self", ".", "time", "=", "time", ".", "time", "(", ")", "self", ".", "datetime", "=", "datetime", ".", "now", "(", "self", ".", "tz", ")", "self", ".",...
Initialize croniter and related times
[ "Initialize", "croniter", "and", "related", "times" ]
python
train
manahl/arctic
arctic/chunkstore/chunkstore.py
https://github.com/manahl/arctic/blob/57e110b6e182dbab00e7e214dc26f7d9ec47c120/arctic/chunkstore/chunkstore.py#L614-L630
def write_metadata(self, symbol, metadata): ''' writes user defined metadata for the given symbol Parameters ---------- symbol: str symbol for the given item in the DB metadata: ? metadata to write ''' sym = self._get_symbol_info(symbol) if not sym: raise NoDataFoundException("Symbol does not exist.") sym[USERMETA] = metadata self._symbols.replace_one({SYMBOL: symbol}, sym)
[ "def", "write_metadata", "(", "self", ",", "symbol", ",", "metadata", ")", ":", "sym", "=", "self", ".", "_get_symbol_info", "(", "symbol", ")", "if", "not", "sym", ":", "raise", "NoDataFoundException", "(", "\"Symbol does not exist.\"", ")", "sym", "[", "US...
writes user defined metadata for the given symbol Parameters ---------- symbol: str symbol for the given item in the DB metadata: ? metadata to write
[ "writes", "user", "defined", "metadata", "for", "the", "given", "symbol" ]
python
train
jeremylow/pyshk
pyshk/api.py
https://github.com/jeremylow/pyshk/blob/3ab92f6706397cde7a18367266eba9e0f1ada868/pyshk/api.py#L384-L402
def save_shared_file(self, sharekey=None): """ Save a SharedFile to your Shake. Args: sharekey (str): Sharekey for the file to save. Returns: SharedFile saved to your shake. """ endpoint = '/api/sharedfile/{sharekey}/save'.format(sharekey=sharekey) data = self._make_request("POST", endpoint=endpoint, data=None) try: sf = SharedFile.NewFromJSON(data) sf.saved = True return sf except: raise Exception("{0}".format(data['error']))
[ "def", "save_shared_file", "(", "self", ",", "sharekey", "=", "None", ")", ":", "endpoint", "=", "'/api/sharedfile/{sharekey}/save'", ".", "format", "(", "sharekey", "=", "sharekey", ")", "data", "=", "self", ".", "_make_request", "(", "\"POST\"", ",", "endpoi...
Save a SharedFile to your Shake. Args: sharekey (str): Sharekey for the file to save. Returns: SharedFile saved to your shake.
[ "Save", "a", "SharedFile", "to", "your", "Shake", "." ]
python
train
danielholmstrom/dictalchemy
dictalchemy/utils.py
https://github.com/danielholmstrom/dictalchemy/blob/038b8822b0ed66feef78a80b3af8f3a09f795b5a/dictalchemy/utils.py#L44-L183
def asdict(model, exclude=None, exclude_underscore=None, exclude_pk=None, follow=None, include=None, only=None, method='asdict', **kwargs): """Get a dict from a model Using the `method` parameter makes it possible to have multiple methods that formats the result. Additional keyword arguments will be passed to all relationships that are followed. This can be used to pass on things like request or context. :param follow: List or dict of relationships that should be followed. If the parameter is a dict the value should be a dict of \ keyword arguments. Currently it follows InstrumentedList, \ MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \ takes an extra argument, 'method', which is the method that \ should be used on the relation. It also takes the extra argument \ 'parent' which determines where the relationships data should be \ added in the response dict. If 'parent' is set the relationship \ will be added with it's own key as a child to `parent`. :param exclude: List of properties that should be excluded, will be \ merged with `model.dictalchemy_exclude` :param exclude_pk: If True any column that refers to the primary key will \ be excluded. :param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\ if set :param include: List of properties that should be included. Use this to \ allow python properties to be called. This list will be merged \ with `model.dictalchemy_asdict_include` or \ `model.dictalchemy_include`. :param only: List of properties that should be included. This will \ override everything else except `follow`. :param method: Name of the method that is currently called. This will be \ the default method used in 'follow' unless another method is\ set. :raises: :class:`dictalchemy.errors.MissingRelationError` \ if `follow` contains a non-existent relationship. :raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \ contains an existing relationship that currently isn't supported. :returns: dict """ follow = arg_to_dict(follow) info = inspect(model) columns = [c.key for c in info.mapper.column_attrs] synonyms = [c.key for c in info.mapper.synonyms] if only: attrs = only else: exclude = exclude or [] exclude += getattr(model, 'dictalchemy_exclude', constants.default_exclude) or [] if exclude_underscore is None: exclude_underscore = getattr(model, 'dictalchemy_exclude_underscore', constants.default_exclude_underscore) if exclude_underscore: # Exclude all properties starting with underscore exclude += [k.key for k in info.mapper.attrs if k.key[0] == '_'] if exclude_pk is True: exclude += [c.key for c in info.mapper.primary_key] include = (include or []) + (getattr(model, 'dictalchemy_asdict_include', getattr(model, 'dictalchemy_include', None)) or []) attrs = [k for k in columns + synonyms + include if k not in exclude] data = dict([(k, getattr(model, k)) for k in attrs]) for (rel_key, orig_args) in follow.iteritems(): try: rel = getattr(model, rel_key) except AttributeError: raise errors.MissingRelationError(rel_key) args = copy.deepcopy(orig_args) method = args.pop('method', method) args['method'] = method args.update(copy.copy(kwargs)) if hasattr(rel, method): rel_data = getattr(rel, method)(**args) elif isinstance(rel, (list, _AssociationList)): rel_data = [] for child in rel: if hasattr(child, method): rel_data.append(getattr(child, method)(**args)) else: try: rel_data.append(dict(child)) # TypeError is for non-dictable children except TypeError: rel_data.append(copy.copy(child)) elif isinstance(rel, dict): rel_data = {} for (child_key, child) in rel.iteritems(): if hasattr(child, method): rel_data[child_key] = getattr(child, method)(**args) else: try: rel_data[child_key] = dict(child) except ValueError: rel_data[child_key] = copy.copy(child) elif isinstance(rel, (AppenderMixin, Query)): rel_data = [] for child in rel.all(): if hasattr(child, method): rel_data.append(getattr(child, method)(**args)) else: rel_data.append(dict(child)) elif rel is None: rel_data = None else: raise errors.UnsupportedRelationError(rel_key) ins_key = args.pop('parent', None) if ins_key is None: data[rel_key] = rel_data else: if ins_key not in data: data[ins_key] = {} data[ins_key][rel_key] = rel_data return data
[ "def", "asdict", "(", "model", ",", "exclude", "=", "None", ",", "exclude_underscore", "=", "None", ",", "exclude_pk", "=", "None", ",", "follow", "=", "None", ",", "include", "=", "None", ",", "only", "=", "None", ",", "method", "=", "'asdict'", ",", ...
Get a dict from a model Using the `method` parameter makes it possible to have multiple methods that formats the result. Additional keyword arguments will be passed to all relationships that are followed. This can be used to pass on things like request or context. :param follow: List or dict of relationships that should be followed. If the parameter is a dict the value should be a dict of \ keyword arguments. Currently it follows InstrumentedList, \ MappedCollection and regular 1:1, 1:m, m:m relationships. Follow \ takes an extra argument, 'method', which is the method that \ should be used on the relation. It also takes the extra argument \ 'parent' which determines where the relationships data should be \ added in the response dict. If 'parent' is set the relationship \ will be added with it's own key as a child to `parent`. :param exclude: List of properties that should be excluded, will be \ merged with `model.dictalchemy_exclude` :param exclude_pk: If True any column that refers to the primary key will \ be excluded. :param exclude_underscore: Overides `model.dictalchemy_exclude_underscore`\ if set :param include: List of properties that should be included. Use this to \ allow python properties to be called. This list will be merged \ with `model.dictalchemy_asdict_include` or \ `model.dictalchemy_include`. :param only: List of properties that should be included. This will \ override everything else except `follow`. :param method: Name of the method that is currently called. This will be \ the default method used in 'follow' unless another method is\ set. :raises: :class:`dictalchemy.errors.MissingRelationError` \ if `follow` contains a non-existent relationship. :raises: :class:`dictalchemy.errors.UnsupportedRelationError` If `follow` \ contains an existing relationship that currently isn't supported. :returns: dict
[ "Get", "a", "dict", "from", "a", "model" ]
python
train
saltstack/salt
salt/modules/chocolatey.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/chocolatey.py#L51-L57
def _clear_context(context): ''' Clear variables stored in __context__. Run this function when a new version of chocolatey is installed. ''' for var in (x for x in __context__ if x.startswith('chocolatey.')): context.pop(var)
[ "def", "_clear_context", "(", "context", ")", ":", "for", "var", "in", "(", "x", "for", "x", "in", "__context__", "if", "x", ".", "startswith", "(", "'chocolatey.'", ")", ")", ":", "context", ".", "pop", "(", "var", ")" ]
Clear variables stored in __context__. Run this function when a new version of chocolatey is installed.
[ "Clear", "variables", "stored", "in", "__context__", ".", "Run", "this", "function", "when", "a", "new", "version", "of", "chocolatey", "is", "installed", "." ]
python
train
alex-kostirin/pyatomac
atomac/ldtpd/table.py
https://github.com/alex-kostirin/pyatomac/blob/3f46f6feb4504315eec07abb18bb41be4d257aeb/atomac/ldtpd/table.py#L250-L275
def selectlastrow(self, window_name, object_name): """ Select last row @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer """ object_handle = self._get_object_handle(window_name, object_name) if not object_handle.AXEnabled: raise LdtpServerException(u"Object %s state disabled" % object_name) cell = object_handle.AXRows[-1] if not cell.AXSelected: object_handle.activate() cell.AXSelected = True else: # Selected pass return 1
[ "def", "selectlastrow", "(", "self", ",", "window_name", ",", "object_name", ")", ":", "object_handle", "=", "self", ".", "_get_object_handle", "(", "window_name", ",", "object_name", ")", "if", "not", "object_handle", ".", "AXEnabled", ":", "raise", "LdtpServer...
Select last row @param window_name: Window name to type in, either full name, LDTP's name convention, or a Unix glob. @type window_name: string @param object_name: Object name to type in, either full name, LDTP's name convention, or a Unix glob. @type object_name: string @return: 1 on success. @rtype: integer
[ "Select", "last", "row" ]
python
valid
jwodder/javaproperties
javaproperties/xmlprops.py
https://github.com/jwodder/javaproperties/blob/8b48f040305217ebeb80c98c4354691bbb01429b/javaproperties/xmlprops.py#L43-L77
def loads_xml(s, object_pairs_hook=dict): r""" Parse the contents of the string ``s`` as an XML properties document and return a `dict` of the key-value pairs. Beyond basic XML well-formedness, `loads_xml` only checks that the root element is named "``properties``" and that all of its ``<entry>`` children have ``key`` attributes. No further validation is performed; if any ``<entry>``\ s happen to contain nested tags, the behavior is undefined. By default, the key-value pairs extracted from ``s`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``s`` (including duplicates) in order of occurrence. `loads_xml` will then return the value returned by ``object_pairs_hook``. .. note:: This uses `xml.etree.ElementTree` for parsing, which does not have decent support for |unicode|_ input in Python 2. Strings containing non-ASCII characters need to be encoded as bytes in Python 2 (Use either UTF-8 or UTF-16 if the XML document does not contain an encoding declaration), while Python 3 accepts both binary and text input. :param string s: the string from which to read the XML properties document :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` or the return value of ``object_pairs_hook`` :raises ValueError: if the root of the XML tree is not a ``<properties>`` tag or an ``<entry>`` element is missing a ``key`` attribute """ elem = ET.fromstring(s) return object_pairs_hook(_fromXML(elem))
[ "def", "loads_xml", "(", "s", ",", "object_pairs_hook", "=", "dict", ")", ":", "elem", "=", "ET", ".", "fromstring", "(", "s", ")", "return", "object_pairs_hook", "(", "_fromXML", "(", "elem", ")", ")" ]
r""" Parse the contents of the string ``s`` as an XML properties document and return a `dict` of the key-value pairs. Beyond basic XML well-formedness, `loads_xml` only checks that the root element is named "``properties``" and that all of its ``<entry>`` children have ``key`` attributes. No further validation is performed; if any ``<entry>``\ s happen to contain nested tags, the behavior is undefined. By default, the key-value pairs extracted from ``s`` are combined into a `dict` with later occurrences of a key overriding previous occurrences of the same key. To change this behavior, pass a callable as the ``object_pairs_hook`` argument; it will be called with one argument, a generator of ``(key, value)`` pairs representing the key-value entries in ``s`` (including duplicates) in order of occurrence. `loads_xml` will then return the value returned by ``object_pairs_hook``. .. note:: This uses `xml.etree.ElementTree` for parsing, which does not have decent support for |unicode|_ input in Python 2. Strings containing non-ASCII characters need to be encoded as bytes in Python 2 (Use either UTF-8 or UTF-16 if the XML document does not contain an encoding declaration), while Python 3 accepts both binary and text input. :param string s: the string from which to read the XML properties document :param callable object_pairs_hook: class or function for combining the key-value pairs :rtype: `dict` or the return value of ``object_pairs_hook`` :raises ValueError: if the root of the XML tree is not a ``<properties>`` tag or an ``<entry>`` element is missing a ``key`` attribute
[ "r", "Parse", "the", "contents", "of", "the", "string", "s", "as", "an", "XML", "properties", "document", "and", "return", "a", "dict", "of", "the", "key", "-", "value", "pairs", "." ]
python
train
jupyterhub/kubespawner
kubespawner/spawner.py
https://github.com/jupyterhub/kubespawner/blob/46a4b109c5e657a4c3d5bfa8ea4731ec6564ea13/kubespawner/spawner.py#L1303-L1376
def get_pod_manifest(self): """ Make a pod manifest that will spawn current user's notebook pod. """ if callable(self.uid): uid = yield gen.maybe_future(self.uid(self)) else: uid = self.uid if callable(self.gid): gid = yield gen.maybe_future(self.gid(self)) else: gid = self.gid if callable(self.fs_gid): fs_gid = yield gen.maybe_future(self.fs_gid(self)) else: fs_gid = self.fs_gid if callable(self.supplemental_gids): supplemental_gids = yield gen.maybe_future(self.supplemental_gids(self)) else: supplemental_gids = self.supplemental_gids if self.cmd: real_cmd = self.cmd + self.get_args() else: real_cmd = None labels = self._build_pod_labels(self._expand_all(self.extra_labels)) annotations = self._build_common_annotations(self._expand_all(self.extra_annotations)) return make_pod( name=self.pod_name, cmd=real_cmd, port=self.port, image=self.image, image_pull_policy=self.image_pull_policy, image_pull_secret=self.image_pull_secrets, node_selector=self.node_selector, run_as_uid=uid, run_as_gid=gid, fs_gid=fs_gid, supplemental_gids=supplemental_gids, run_privileged=self.privileged, env=self.get_env(), volumes=self._expand_all(self.volumes), volume_mounts=self._expand_all(self.volume_mounts), working_dir=self.working_dir, labels=labels, annotations=annotations, cpu_limit=self.cpu_limit, cpu_guarantee=self.cpu_guarantee, mem_limit=self.mem_limit, mem_guarantee=self.mem_guarantee, extra_resource_limits=self.extra_resource_limits, extra_resource_guarantees=self.extra_resource_guarantees, lifecycle_hooks=self.lifecycle_hooks, init_containers=self._expand_all(self.init_containers), service_account=self.service_account, extra_container_config=self.extra_container_config, extra_pod_config=self.extra_pod_config, extra_containers=self._expand_all(self.extra_containers), scheduler_name=self.scheduler_name, tolerations=self.tolerations, node_affinity_preferred=self.node_affinity_preferred, node_affinity_required=self.node_affinity_required, pod_affinity_preferred=self.pod_affinity_preferred, pod_affinity_required=self.pod_affinity_required, pod_anti_affinity_preferred=self.pod_anti_affinity_preferred, pod_anti_affinity_required=self.pod_anti_affinity_required, priority_class_name=self.priority_class_name, logger=self.log, )
[ "def", "get_pod_manifest", "(", "self", ")", ":", "if", "callable", "(", "self", ".", "uid", ")", ":", "uid", "=", "yield", "gen", ".", "maybe_future", "(", "self", ".", "uid", "(", "self", ")", ")", "else", ":", "uid", "=", "self", ".", "uid", "...
Make a pod manifest that will spawn current user's notebook pod.
[ "Make", "a", "pod", "manifest", "that", "will", "spawn", "current", "user", "s", "notebook", "pod", "." ]
python
train
haifengat/hf_ctp_py_proxy
py_ctp/quote.py
https://github.com/haifengat/hf_ctp_py_proxy/blob/c2dc6dbde45aa6b097f75380474e91510d3f5d12/py_ctp/quote.py#L62-L69
def ReqUserLogout(self): """退出接口(正常退出,不会触发OnFrontDisconnected)""" self.q.Release() # 确保隔夜或重新登录时的第1个tick不被发送到客户端 self.inst_tick.clear() self.logined = False threading.Thread(target=self.OnDisConnected, args=(self, 0)).start()
[ "def", "ReqUserLogout", "(", "self", ")", ":", "self", ".", "q", ".", "Release", "(", ")", "# 确保隔夜或重新登录时的第1个tick不被发送到客户端", "self", ".", "inst_tick", ".", "clear", "(", ")", "self", ".", "logined", "=", "False", "threading", ".", "Thread", "(", "target", ...
退出接口(正常退出,不会触发OnFrontDisconnected)
[ "退出接口", "(", "正常退出", "不会触发OnFrontDisconnected", ")" ]
python
train
pudo/googlesheets
googlesheets/sheet.py
https://github.com/pudo/googlesheets/blob/c38725d79bfe048c0519a674019ba313dfc5bfb0/googlesheets/sheet.py#L75-L81
def insert(self, row): """ Insert a new row. The row will be added to the end of the spreadsheet. Before inserting, the field names in the given row will be normalized and values with empty field names removed. """ data = self._convert_value(row) self._service.InsertRow(data, self._ss.id, self.id)
[ "def", "insert", "(", "self", ",", "row", ")", ":", "data", "=", "self", ".", "_convert_value", "(", "row", ")", "self", ".", "_service", ".", "InsertRow", "(", "data", ",", "self", ".", "_ss", ".", "id", ",", "self", ".", "id", ")" ]
Insert a new row. The row will be added to the end of the spreadsheet. Before inserting, the field names in the given row will be normalized and values with empty field names removed.
[ "Insert", "a", "new", "row", ".", "The", "row", "will", "be", "added", "to", "the", "end", "of", "the", "spreadsheet", ".", "Before", "inserting", "the", "field", "names", "in", "the", "given", "row", "will", "be", "normalized", "and", "values", "with", ...
python
train
thanethomson/statik
setup.py
https://github.com/thanethomson/statik/blob/56b1b5a2cb05a97afa81f428bfcefc833e935b8d/setup.py#L22-L49
def read_requirements(filename): """ Parse a requirements file. Accepts vcs+ links, and places the URL into `DEPENDENCY_LINKS`. :return: list of str for each package """ data = [] for line in read_file(filename): line = line.strip() if not line or line.startswith('#'): continue if '+' in line[:4]: repo_link, egg_name = line.split('#egg=') if not egg_name: raise ValueError('Unknown requirement: {0}' .format(line)) DEPENDENCY_LINKS.append(line) line = egg_name data.append(line) return data
[ "def", "read_requirements", "(", "filename", ")", ":", "data", "=", "[", "]", "for", "line", "in", "read_file", "(", "filename", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "line", "or", "line", ".", "startswith", "(", "'#'", ...
Parse a requirements file. Accepts vcs+ links, and places the URL into `DEPENDENCY_LINKS`. :return: list of str for each package
[ "Parse", "a", "requirements", "file", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_threshold_monitor.py#L539-L550
def threshold_monitor_hidden_threshold_monitor_Memory_actions(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") threshold_monitor_hidden = ET.SubElement(config, "threshold-monitor-hidden", xmlns="urn:brocade.com:mgmt:brocade-threshold-monitor") threshold_monitor = ET.SubElement(threshold_monitor_hidden, "threshold-monitor") Memory = ET.SubElement(threshold_monitor, "Memory") actions = ET.SubElement(Memory, "actions") actions.text = kwargs.pop('actions') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "threshold_monitor_hidden_threshold_monitor_Memory_actions", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "threshold_monitor_hidden", "=", "ET", ".", "SubElement", "(", "config", ",", "\"thresh...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
brocade/pynos
pynos/versions/base/yang/brocade_fcoe.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/base/yang/brocade_fcoe.py#L161-L171
def fcoe_fcoe_fcf_map_fcf_map_name(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") fcoe = ET.SubElement(config, "fcoe", xmlns="urn:brocade.com:mgmt:brocade-fcoe") fcoe_fcf_map = ET.SubElement(fcoe, "fcoe-fcf-map") fcf_map_name = ET.SubElement(fcoe_fcf_map, "fcf-map-name") fcf_map_name.text = kwargs.pop('fcf_map_name') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "fcoe_fcoe_fcf_map_fcf_map_name", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "fcoe", "=", "ET", ".", "SubElement", "(", "config", ",", "\"fcoe\"", ",", "xmlns", "=", "\"urn:brocade.co...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
pyviz/param
param/version.py
https://github.com/pyviz/param/blob/8f0dafa78defa883247b40635f96cc6d5c1b3481/param/version.py#L743-L764
def verify(self, string_version=None): """ Check that the version information is consistent with the VCS before doing a release. If supplied with a string version, this is also checked against the current version. Should be called from setup.py with the declared package version before releasing to PyPI. """ if string_version and string_version != str(self): raise Exception("Supplied string version does not match current version.") if self.dirty: raise Exception("Current working directory is dirty.") if self.release != self.expected_release: raise Exception("Declared release does not match current release tag.") if self.commit_count !=0: raise Exception("Please update the VCS version tag before release.") if self._expected_commit not in [None, "$Format:%h$"]: raise Exception("Declared release does not match the VCS version tag")
[ "def", "verify", "(", "self", ",", "string_version", "=", "None", ")", ":", "if", "string_version", "and", "string_version", "!=", "str", "(", "self", ")", ":", "raise", "Exception", "(", "\"Supplied string version does not match current version.\"", ")", "if", "s...
Check that the version information is consistent with the VCS before doing a release. If supplied with a string version, this is also checked against the current version. Should be called from setup.py with the declared package version before releasing to PyPI.
[ "Check", "that", "the", "version", "information", "is", "consistent", "with", "the", "VCS", "before", "doing", "a", "release", ".", "If", "supplied", "with", "a", "string", "version", "this", "is", "also", "checked", "against", "the", "current", "version", "...
python
train
pypa/pipenv
pipenv/vendor/distlib/_backport/tarfile.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/_backport/tarfile.py#L2414-L2458
def next(self): """Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available. """ self._check("ra") if self.firstmember is not None: m = self.firstmember self.firstmember = None return m # Read the next block. self.fileobj.seek(self.offset) tarinfo = None while True: try: tarinfo = self.tarinfo.fromtarfile(self) except EOFHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue except InvalidHeaderError as e: if self.ignore_zeros: self._dbg(2, "0x%X: %s" % (self.offset, e)) self.offset += BLOCKSIZE continue elif self.offset == 0: raise ReadError(str(e)) except EmptyHeaderError: if self.offset == 0: raise ReadError("empty file") except TruncatedHeaderError as e: if self.offset == 0: raise ReadError(str(e)) except SubsequentHeaderError as e: raise ReadError(str(e)) break if tarinfo is not None: self.members.append(tarinfo) else: self._loaded = True return tarinfo
[ "def", "next", "(", "self", ")", ":", "self", ".", "_check", "(", "\"ra\"", ")", "if", "self", ".", "firstmember", "is", "not", "None", ":", "m", "=", "self", ".", "firstmember", "self", ".", "firstmember", "=", "None", "return", "m", "# Read the next ...
Return the next member of the archive as a TarInfo object, when TarFile is opened for reading. Return None if there is no more available.
[ "Return", "the", "next", "member", "of", "the", "archive", "as", "a", "TarInfo", "object", "when", "TarFile", "is", "opened", "for", "reading", ".", "Return", "None", "if", "there", "is", "no", "more", "available", "." ]
python
train
clach04/python-tuya
pytuya/__init__.py
https://github.com/clach04/python-tuya/blob/7b89d38c56f6e25700e2a333000d25bc8d923622/pytuya/__init__.py#L316-L336
def set_timer(self, num_secs): """ Set a timer. Args: num_secs(int): Number of seconds """ # FIXME / TODO support schemas? Accept timer id number as parameter? # Dumb heuristic; Query status, pick last device id as that is probably the timer status = self.status() devices = status['dps'] devices_numbers = list(devices.keys()) devices_numbers.sort() dps_id = devices_numbers[-1] payload = self.generate_payload(SET, {dps_id:num_secs}) data = self._send_receive(payload) log.debug('set_timer received data=%r', data) return data
[ "def", "set_timer", "(", "self", ",", "num_secs", ")", ":", "# FIXME / TODO support schemas? Accept timer id number as parameter?", "# Dumb heuristic; Query status, pick last device id as that is probably the timer", "status", "=", "self", ".", "status", "(", ")", "devices", "=",...
Set a timer. Args: num_secs(int): Number of seconds
[ "Set", "a", "timer", ".", "Args", ":", "num_secs", "(", "int", ")", ":", "Number", "of", "seconds" ]
python
train
ic-labs/django-icekit
icekit_events/forms.py
https://github.com/ic-labs/django-icekit/blob/c507ea5b1864303732c53ad7c5800571fca5fa94/icekit_events/forms.py#L99-L125
def render(self, name, value, attrs=None): """ Render the ``icekit_events/recurrence_rule_widget/render.html`` template with the following context: rendered_widgets The rendered widgets. id The ``id`` attribute from the ``attrs`` keyword argument. recurrence_rules A JSON object mapping recurrence rules to their primary keys. The default template adds JavaScript event handlers that update the ``Textarea`` and ``Select`` widgets when they are updated. """ rendered_widgets = super(RecurrenceRuleWidget, self).render( name, value, attrs) template = loader.get_template( 'icekit_events/recurrence_rule_widget/render.html') recurrence_rules = json.dumps(dict( self.queryset.values_list('pk', 'recurrence_rule'))) context = Context({ 'rendered_widgets': rendered_widgets, 'id': attrs['id'], 'recurrence_rules': recurrence_rules, }) return template.render(context)
[ "def", "render", "(", "self", ",", "name", ",", "value", ",", "attrs", "=", "None", ")", ":", "rendered_widgets", "=", "super", "(", "RecurrenceRuleWidget", ",", "self", ")", ".", "render", "(", "name", ",", "value", ",", "attrs", ")", "template", "=",...
Render the ``icekit_events/recurrence_rule_widget/render.html`` template with the following context: rendered_widgets The rendered widgets. id The ``id`` attribute from the ``attrs`` keyword argument. recurrence_rules A JSON object mapping recurrence rules to their primary keys. The default template adds JavaScript event handlers that update the ``Textarea`` and ``Select`` widgets when they are updated.
[ "Render", "the", "icekit_events", "/", "recurrence_rule_widget", "/", "render", ".", "html", "template", "with", "the", "following", "context", ":" ]
python
train
tomduck/pandoc-fignos
pandoc_fignos.py
https://github.com/tomduck/pandoc-fignos/blob/44776d885b8101d9d6aeb6845b17ad1625e88e6f/pandoc_fignos.py#L121-L201
def _process_figure(value, fmt): """Processes the figure. Returns a dict containing figure properties.""" # pylint: disable=global-statement global Nreferences # Global references counter global has_unnumbered_figures # Flags unnumbered figures were found global cursec # Current section # Parse the image attrs, caption = value[0]['c'][:2] # Initialize the return value fig = {'is_unnumbered': False, 'is_unreferenceable': False, 'is_tagged': False, 'attrs': attrs} # Bail out if the label does not conform if not LABEL_PATTERN.match(attrs[0]): has_unnumbered_figures = True fig['is_unnumbered'] = True fig['is_unreferenceable'] = True return fig # Process unreferenceable figures if attrs[0] == 'fig:': # Make up a unique description attrs[0] = attrs[0] + str(uuid.uuid4()) fig['is_unreferenceable'] = True unreferenceable.append(attrs[0]) # For html, hard-code in the section numbers as tags kvs = PandocAttributes(attrs, 'pandoc').kvs if numbersections and fmt in ['html', 'html5'] and 'tag' not in kvs: if kvs['secno'] != cursec: cursec = kvs['secno'] Nreferences = 1 kvs['tag'] = cursec + '.' + str(Nreferences) Nreferences += 1 # Save to the global references tracker fig['is_tagged'] = 'tag' in kvs if fig['is_tagged']: # Remove any surrounding quotes if kvs['tag'][0] == '"' and kvs['tag'][-1] == '"': kvs['tag'] = kvs['tag'].strip('"') elif kvs['tag'][0] == "'" and kvs['tag'][-1] == "'": kvs['tag'] = kvs['tag'].strip("'") references[attrs[0]] = kvs['tag'] else: Nreferences += 1 references[attrs[0]] = Nreferences # Adjust caption depending on the output format if fmt in ['latex', 'beamer']: # Append a \label if this is referenceable if not fig['is_unreferenceable']: value[0]['c'][1] += [RawInline('tex', r'\label{%s}'%attrs[0])] else: # Hard-code in the caption name and number/tag if isinstance(references[attrs[0]], int): # Numbered reference value[0]['c'][1] = [RawInline('html', r'<span>'), Str(captionname), Space(), Str('%d:'%references[attrs[0]]), RawInline('html', r'</span>')] \ if fmt in ['html', 'html5'] else \ [Str(captionname), Space(), Str('%d:'%references[attrs[0]])] value[0]['c'][1] += [Space()] + list(caption) else: # Tagged reference assert isinstance(references[attrs[0]], STRTYPES) text = references[attrs[0]] if text.startswith('$') and text.endswith('$'): # Math math = text.replace(' ', r'\ ')[1:-1] els = [Math({"t":"InlineMath", "c":[]}, math), Str(':')] else: # Text els = [Str(text+':')] value[0]['c'][1] = \ [RawInline('html', r'<span>'), Str(captionname), Space()] + \ els + [RawInline('html', r'</span>')] \ if fmt in ['html', 'html5'] else \ [Str(captionname), Space()] + els value[0]['c'][1] += [Space()] + list(caption) return fig
[ "def", "_process_figure", "(", "value", ",", "fmt", ")", ":", "# pylint: disable=global-statement", "global", "Nreferences", "# Global references counter", "global", "has_unnumbered_figures", "# Flags unnumbered figures were found", "global", "cursec", "# Current section", "# Par...
Processes the figure. Returns a dict containing figure properties.
[ "Processes", "the", "figure", ".", "Returns", "a", "dict", "containing", "figure", "properties", "." ]
python
train
pytorch/text
torchtext/utils.py
https://github.com/pytorch/text/blob/26bfce6869dc704f1d86792f9a681d453d7e7bb8/torchtext/utils.py#L60-L72
def unicode_csv_reader(unicode_csv_data, **kwargs): """Since the standard csv library does not handle unicode in Python 2, we need a wrapper. Borrowed and slightly modified from the Python docs: https://docs.python.org/2/library/csv.html#csv-examples""" if six.PY2: # csv.py doesn't do Unicode; encode temporarily as UTF-8: csv_reader = csv.reader(utf_8_encoder(unicode_csv_data), **kwargs) for row in csv_reader: # decode UTF-8 back to Unicode, cell by cell: yield [cell.decode('utf-8') for cell in row] else: for line in csv.reader(unicode_csv_data, **kwargs): yield line
[ "def", "unicode_csv_reader", "(", "unicode_csv_data", ",", "*", "*", "kwargs", ")", ":", "if", "six", ".", "PY2", ":", "# csv.py doesn't do Unicode; encode temporarily as UTF-8:", "csv_reader", "=", "csv", ".", "reader", "(", "utf_8_encoder", "(", "unicode_csv_data", ...
Since the standard csv library does not handle unicode in Python 2, we need a wrapper. Borrowed and slightly modified from the Python docs: https://docs.python.org/2/library/csv.html#csv-examples
[ "Since", "the", "standard", "csv", "library", "does", "not", "handle", "unicode", "in", "Python", "2", "we", "need", "a", "wrapper", ".", "Borrowed", "and", "slightly", "modified", "from", "the", "Python", "docs", ":", "https", ":", "//", "docs", ".", "p...
python
train
ozgurgunes/django-manifest
manifest/accounts/utils.py
https://github.com/ozgurgunes/django-manifest/blob/9873bbf2a475b76284ad7e36b2b26c92131e72dd/manifest/accounts/utils.py#L54-L77
def login_redirect(redirect=None, user=None): """ Redirect user after successful sign in. First looks for a ``requested_redirect``. If not supplied will fall-back to the user specific account page. If all fails, will fall-back to the standard Django ``LOGIN_REDIRECT_URL`` setting. Returns a string defining the URI to go next. :param redirect: A value normally supplied by ``next`` form field. Gets preference before the default view which requires the user. :param user: A ``User`` object specifying the user who has just logged in. :return: String containing the URI to redirect to. """ if redirect: return redirect elif user is not None: return defaults.ACCOUNTS_LOGIN_REDIRECT_URL % \ {'username': user.username} else: return settings.LOGIN_REDIRECT_URL
[ "def", "login_redirect", "(", "redirect", "=", "None", ",", "user", "=", "None", ")", ":", "if", "redirect", ":", "return", "redirect", "elif", "user", "is", "not", "None", ":", "return", "defaults", ".", "ACCOUNTS_LOGIN_REDIRECT_URL", "%", "{", "'username'"...
Redirect user after successful sign in. First looks for a ``requested_redirect``. If not supplied will fall-back to the user specific account page. If all fails, will fall-back to the standard Django ``LOGIN_REDIRECT_URL`` setting. Returns a string defining the URI to go next. :param redirect: A value normally supplied by ``next`` form field. Gets preference before the default view which requires the user. :param user: A ``User`` object specifying the user who has just logged in. :return: String containing the URI to redirect to.
[ "Redirect", "user", "after", "successful", "sign", "in", "." ]
python
train
tensorflow/tensorboard
tensorboard/plugins/graph/keras_util.py
https://github.com/tensorflow/tensorboard/blob/8e5f497b48e40f2a774f85416b8a35ac0693c35e/tensorboard/plugins/graph/keras_util.py#L180-L239
def keras_model_to_graph_def(keras_layer): """Returns a GraphDef representation of the Keras model in a dict form. Note that it only supports models that implemented to_json(). Args: keras_layer: A dict from Keras model.to_json(). Returns: A GraphDef representation of the layers in the model. """ input_to_layer = {} model_name_to_output = {} g = GraphDef() # Sequential model layers do not have a field "inbound_nodes" but # instead are defined implicitly via order of layers. prev_node_name = None for (name_scope, layer) in _walk_layers(keras_layer): if _is_model(layer): (input_to_layer, model_name_to_output, prev_node_name) = _update_dicts( name_scope, layer, input_to_layer, model_name_to_output, prev_node_name) continue layer_config = layer.get('config') node_name = _scoped_name(name_scope, layer_config.get('name')) node_def = g.node.add() node_def.name = node_name if layer.get('class_name') is not None: keras_cls_name = layer.get('class_name').encode('ascii') node_def.attr['keras_class'].s = keras_cls_name if layer_config.get('dtype') is not None: tf_dtype = dtypes.as_dtype(layer_config.get('dtype')) node_def.attr['dtype'].type = tf_dtype.as_datatype_enum if layer.get('inbound_nodes') is not None: for maybe_inbound_node in layer.get('inbound_nodes'): inbound_nodes = _norm_to_list_of_layers(maybe_inbound_node) for [name, size, index, _] in inbound_nodes: inbound_name = _scoped_name(name_scope, name) # An input to a layer can be output from a model. In that case, the name # of inbound_nodes to a layer is a name of a model. Remap the name of the # model to output layer of the model. Also, since there can be multiple # outputs in a model, make sure we pick the right output_layer from the model. inbound_node_names = model_name_to_output.get( inbound_name, [inbound_name]) node_def.input.append(inbound_node_names[index]) elif prev_node_name is not None: node_def.input.append(prev_node_name) if node_name in input_to_layer: node_def.input.append(input_to_layer.get(node_name)) prev_node_name = node_def.name return g
[ "def", "keras_model_to_graph_def", "(", "keras_layer", ")", ":", "input_to_layer", "=", "{", "}", "model_name_to_output", "=", "{", "}", "g", "=", "GraphDef", "(", ")", "# Sequential model layers do not have a field \"inbound_nodes\" but", "# instead are defined implicitly vi...
Returns a GraphDef representation of the Keras model in a dict form. Note that it only supports models that implemented to_json(). Args: keras_layer: A dict from Keras model.to_json(). Returns: A GraphDef representation of the layers in the model.
[ "Returns", "a", "GraphDef", "representation", "of", "the", "Keras", "model", "in", "a", "dict", "form", "." ]
python
train
Erotemic/utool
utool/util_dev.py
https://github.com/Erotemic/utool/blob/3b27e1f4e6e6fb23cd8744af7b7195b57d99e03a/utool/util_dev.py#L2926-L2932
def map_column(self, keys, func): """ Args: keys (list or str): the column name(s) to apply the `func` to func (callable): applied to each element in the specified columns """ return [[func(v) for v in self[key]] for key in keys]
[ "def", "map_column", "(", "self", ",", "keys", ",", "func", ")", ":", "return", "[", "[", "func", "(", "v", ")", "for", "v", "in", "self", "[", "key", "]", "]", "for", "key", "in", "keys", "]" ]
Args: keys (list or str): the column name(s) to apply the `func` to func (callable): applied to each element in the specified columns
[ "Args", ":", "keys", "(", "list", "or", "str", ")", ":", "the", "column", "name", "(", "s", ")", "to", "apply", "the", "func", "to", "func", "(", "callable", ")", ":", "applied", "to", "each", "element", "in", "the", "specified", "columns" ]
python
train
fossasia/knittingpattern
knittingpattern/Mesh.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/Mesh.py#L304-L316
def connect_to(self, other_mesh): """Create a connection to an other mesh. .. warning:: Both meshes need to be disconnected and one needs to be a consumed and the other a produced mesh. You can check if a connection is possible using :meth:`can_connect_to`. .. seealso:: :meth:`is_consumed`, :meth:`is_produced`, :meth:`can_connect_to` """ other_mesh.disconnect() self.disconnect() self._connect_to(other_mesh)
[ "def", "connect_to", "(", "self", ",", "other_mesh", ")", ":", "other_mesh", ".", "disconnect", "(", ")", "self", ".", "disconnect", "(", ")", "self", ".", "_connect_to", "(", "other_mesh", ")" ]
Create a connection to an other mesh. .. warning:: Both meshes need to be disconnected and one needs to be a consumed and the other a produced mesh. You can check if a connection is possible using :meth:`can_connect_to`. .. seealso:: :meth:`is_consumed`, :meth:`is_produced`, :meth:`can_connect_to`
[ "Create", "a", "connection", "to", "an", "other", "mesh", "." ]
python
valid
pypa/pipenv
pipenv/vendor/distlib/util.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/distlib/util.py#L988-L1006
def add(self, event, subscriber, append=True): """ Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event. """ subs = self._subscribers if event not in subs: subs[event] = deque([subscriber]) else: sq = subs[event] if append: sq.append(subscriber) else: sq.appendleft(subscriber)
[ "def", "add", "(", "self", ",", "event", ",", "subscriber", ",", "append", "=", "True", ")", ":", "subs", "=", "self", ".", "_subscribers", "if", "event", "not", "in", "subs", ":", "subs", "[", "event", "]", "=", "deque", "(", "[", "subscriber", "]...
Add a subscriber for an event. :param event: The name of an event. :param subscriber: The subscriber to be added (and called when the event is published). :param append: Whether to append or prepend the subscriber to an existing subscriber list for the event.
[ "Add", "a", "subscriber", "for", "an", "event", "." ]
python
train
hvac/hvac
hvac/api/secrets_engines/transit.py
https://github.com/hvac/hvac/blob/cce5b86889193f622c2a72a4a1b7e1c9c8aff1ce/hvac/api/secrets_engines/transit.py#L334-L374
def decrypt_data(self, name, ciphertext, context="", nonce="", batch_input=None, mount_point=DEFAULT_MOUNT_POINT): """Decrypt the provided ciphertext using the named key. Supported methods: POST: /{mount_point}/decrypt/{name}. Produces: 200 application/json :param name: Specifies the name of the encryption key to decrypt against. This is specified as part of the URL. :type name: str | unicode :param ciphertext: the ciphertext to decrypt. :type ciphertext: str | unicode :param context: Specifies the base64 encoded context for key derivation. This is required if key derivation is enabled. :type context: str | unicode :param nonce: Specifies a base64 encoded nonce value used during encryption. Must be provided if convergent encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+. :type nonce: str | unicode :param batch_input: Specifies a list of items to be decrypted in a single batch. When this parameter is set, if the parameters 'ciphertext', 'context' and 'nonce' are also set, they will be ignored. Format for the input goes like this: [dict(context="b64_context", ciphertext="b64_plaintext"), ...] :type batch_input: List[dict] :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response """ params = { 'ciphertext': ciphertext, 'context': context, 'nonce': nonce, 'batch_input': batch_input, } api_path = '/v1/{mount_point}/decrypt/{name}'.format( mount_point=mount_point, name=name, ) response = self._adapter.post( url=api_path, json=params, ) return response.json()
[ "def", "decrypt_data", "(", "self", ",", "name", ",", "ciphertext", ",", "context", "=", "\"\"", ",", "nonce", "=", "\"\"", ",", "batch_input", "=", "None", ",", "mount_point", "=", "DEFAULT_MOUNT_POINT", ")", ":", "params", "=", "{", "'ciphertext'", ":", ...
Decrypt the provided ciphertext using the named key. Supported methods: POST: /{mount_point}/decrypt/{name}. Produces: 200 application/json :param name: Specifies the name of the encryption key to decrypt against. This is specified as part of the URL. :type name: str | unicode :param ciphertext: the ciphertext to decrypt. :type ciphertext: str | unicode :param context: Specifies the base64 encoded context for key derivation. This is required if key derivation is enabled. :type context: str | unicode :param nonce: Specifies a base64 encoded nonce value used during encryption. Must be provided if convergent encryption is enabled for this key and the key was generated with Vault 0.6.1. Not required for keys created in 0.6.2+. :type nonce: str | unicode :param batch_input: Specifies a list of items to be decrypted in a single batch. When this parameter is set, if the parameters 'ciphertext', 'context' and 'nonce' are also set, they will be ignored. Format for the input goes like this: [dict(context="b64_context", ciphertext="b64_plaintext"), ...] :type batch_input: List[dict] :param mount_point: The "path" the method/backend was mounted on. :type mount_point: str | unicode :return: The JSON response of the request. :rtype: requests.Response
[ "Decrypt", "the", "provided", "ciphertext", "using", "the", "named", "key", "." ]
python
train
nitmir/django-cas-server
cas_server/views.py
https://github.com/nitmir/django-cas-server/blob/d106181b94c444f1946269da5c20f6c904840ad3/cas_server/views.py#L606-L617
def get(self, request, *args, **kwargs): """ method called on GET request on this view :param django.http.HttpRequest request: The current request object """ # initialize class parameters self.init_get(request) # process the GET request self.process_get() # call the GET/POST common part return self.common()
[ "def", "get", "(", "self", ",", "request", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "# initialize class parameters", "self", ".", "init_get", "(", "request", ")", "# process the GET request", "self", ".", "process_get", "(", ")", "# call the GET/PO...
method called on GET request on this view :param django.http.HttpRequest request: The current request object
[ "method", "called", "on", "GET", "request", "on", "this", "view" ]
python
train
googleapis/google-cloud-python
bigquery/google/cloud/bigquery/client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigquery/google/cloud/bigquery/client.py#L1716-L1798
def insert_rows_json( self, table, json_rows, row_ids=None, skip_invalid_rows=None, ignore_unknown_values=None, template_suffix=None, retry=DEFAULT_RETRY, ): """Insert rows into a table without applying local type conversions. See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll table (Union[ \ :class:`~google.cloud.bigquery.table.Table` \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): The destination table for the row data, or a reference to it. json_rows (Sequence[dict]): Row data to be inserted. Keys must match the table schema fields and values must be JSON-compatible representations. row_ids (Sequence[str]): (Optional) Unique ids, one per row being inserted. If omitted, unique IDs are created. skip_invalid_rows (bool): (Optional) Insert all valid rows of a request, even if invalid rows exist. The default value is False, which causes the entire request to fail if any invalid rows exist. ignore_unknown_values (bool): (Optional) Accept rows that contain values that do not match the schema. The unknown values are ignored. Default is False, which treats unknown values as errors. template_suffix (str): (Optional) treat ``name`` as a template table and provide a suffix. BigQuery will create the table ``<name> + <template_suffix>`` based on the schema of the template table. See https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables retry (:class:`google.api_core.retry.Retry`): (Optional) How to retry the RPC. Returns: Sequence[Mappings]: One mapping per row with insert errors: the "index" key identifies the row, and the "errors" key contains a list of the mappings describing one or more problems with the row. """ # Convert table to just a reference because unlike insert_rows, # insert_rows_json doesn't need the table schema. It's not doing any # type conversions. table = _table_arg_to_table_ref(table, default_project=self.project) rows_info = [] data = {"rows": rows_info} for index, row in enumerate(json_rows): info = {"json": row} if row_ids is not None: info["insertId"] = row_ids[index] else: info["insertId"] = str(uuid.uuid4()) rows_info.append(info) if skip_invalid_rows is not None: data["skipInvalidRows"] = skip_invalid_rows if ignore_unknown_values is not None: data["ignoreUnknownValues"] = ignore_unknown_values if template_suffix is not None: data["templateSuffix"] = template_suffix # We can always retry, because every row has an insert ID. response = self._call_api( retry, method="POST", path="%s/insertAll" % table.path, data=data ) errors = [] for error in response.get("insertErrors", ()): errors.append({"index": int(error["index"]), "errors": error["errors"]}) return errors
[ "def", "insert_rows_json", "(", "self", ",", "table", ",", "json_rows", ",", "row_ids", "=", "None", ",", "skip_invalid_rows", "=", "None", ",", "ignore_unknown_values", "=", "None", ",", "template_suffix", "=", "None", ",", "retry", "=", "DEFAULT_RETRY", ",",...
Insert rows into a table without applying local type conversions. See https://cloud.google.com/bigquery/docs/reference/rest/v2/tabledata/insertAll table (Union[ \ :class:`~google.cloud.bigquery.table.Table` \ :class:`~google.cloud.bigquery.table.TableReference`, \ str, \ ]): The destination table for the row data, or a reference to it. json_rows (Sequence[dict]): Row data to be inserted. Keys must match the table schema fields and values must be JSON-compatible representations. row_ids (Sequence[str]): (Optional) Unique ids, one per row being inserted. If omitted, unique IDs are created. skip_invalid_rows (bool): (Optional) Insert all valid rows of a request, even if invalid rows exist. The default value is False, which causes the entire request to fail if any invalid rows exist. ignore_unknown_values (bool): (Optional) Accept rows that contain values that do not match the schema. The unknown values are ignored. Default is False, which treats unknown values as errors. template_suffix (str): (Optional) treat ``name`` as a template table and provide a suffix. BigQuery will create the table ``<name> + <template_suffix>`` based on the schema of the template table. See https://cloud.google.com/bigquery/streaming-data-into-bigquery#template-tables retry (:class:`google.api_core.retry.Retry`): (Optional) How to retry the RPC. Returns: Sequence[Mappings]: One mapping per row with insert errors: the "index" key identifies the row, and the "errors" key contains a list of the mappings describing one or more problems with the row.
[ "Insert", "rows", "into", "a", "table", "without", "applying", "local", "type", "conversions", "." ]
python
train
elastic/elasticsearch-py
elasticsearch/client/xpack/rollup.py
https://github.com/elastic/elasticsearch-py/blob/2aab285c8f506f3863cbdaba3c90a685c510ba00/elasticsearch/client/xpack/rollup.py#L19-L28
def get_jobs(self, id=None, params=None): """ `<>`_ :arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs """ return self.transport.perform_request( "GET", _make_path("_rollup", "job", id), params=params )
[ "def", "get_jobs", "(", "self", ",", "id", "=", "None", ",", "params", "=", "None", ")", ":", "return", "self", ".", "transport", ".", "perform_request", "(", "\"GET\"", ",", "_make_path", "(", "\"_rollup\"", ",", "\"job\"", ",", "id", ")", ",", "param...
`<>`_ :arg id: The ID of the job(s) to fetch. Accepts glob patterns, or left blank for all jobs
[ "<", ">", "_" ]
python
train
IceflowRE/unidown
unidown/core/manager.py
https://github.com/IceflowRE/unidown/blob/2a6f82ab780bb825668bfc55b67c11c4f72ec05c/unidown/core/manager.py#L163-L178
def check_update(): """ Check for app updates and print/log them. """ logging.info('Check for app updates.') try: update = updater.check_for_app_updates() except Exception: logging.exception('Check for updates failed.') return if update: print("!!! UPDATE AVAILABLE !!!\n" "" + static_data.PROJECT_URL + "\n\n") logging.info("Update available: " + static_data.PROJECT_URL) else: logging.info("No update available.")
[ "def", "check_update", "(", ")", ":", "logging", ".", "info", "(", "'Check for app updates.'", ")", "try", ":", "update", "=", "updater", ".", "check_for_app_updates", "(", ")", "except", "Exception", ":", "logging", ".", "exception", "(", "'Check for updates fa...
Check for app updates and print/log them.
[ "Check", "for", "app", "updates", "and", "print", "/", "log", "them", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/ImSim/image_model.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/ImSim/image_model.py#L163-L175
def linear_response_matrix(self, kwargs_lens=None, kwargs_source=None, kwargs_lens_light=None, kwargs_ps=None): """ computes the linear response matrix (m x n), with n beeing the data size and m being the coefficients :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: :return: """ A = self._response_matrix(self.ImageNumerics.ra_grid_ray_shooting, self.ImageNumerics.dec_grid_ray_shooting, kwargs_lens, kwargs_source, kwargs_lens_light, kwargs_ps, self.ImageNumerics.mask) return A
[ "def", "linear_response_matrix", "(", "self", ",", "kwargs_lens", "=", "None", ",", "kwargs_source", "=", "None", ",", "kwargs_lens_light", "=", "None", ",", "kwargs_ps", "=", "None", ")", ":", "A", "=", "self", ".", "_response_matrix", "(", "self", ".", "...
computes the linear response matrix (m x n), with n beeing the data size and m being the coefficients :param kwargs_lens: :param kwargs_source: :param kwargs_lens_light: :param kwargs_ps: :return:
[ "computes", "the", "linear", "response", "matrix", "(", "m", "x", "n", ")", "with", "n", "beeing", "the", "data", "size", "and", "m", "being", "the", "coefficients" ]
python
train
BD2KGenomics/toil-scripts
src/toil_scripts/exome_variant_pipeline/exome_variant_pipeline.py
https://github.com/BD2KGenomics/toil-scripts/blob/f878d863defcdccaabb7fe06f991451b7a198fb7/src/toil_scripts/exome_variant_pipeline/exome_variant_pipeline.py#L46-L57
def reference_preprocessing(job, samples, config): """ Spawn the jobs that create index and dict file for reference :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information """ job.fileStore.logToMaster('Processed reference files') config.fai = job.addChildJobFn(run_samtools_faidx, config.reference).rv() config.dict = job.addChildJobFn(run_picard_create_sequence_dictionary, config.reference).rv() job.addFollowOnJobFn(map_job, download_sample, samples, config)
[ "def", "reference_preprocessing", "(", "job", ",", "samples", ",", "config", ")", ":", "job", ".", "fileStore", ".", "logToMaster", "(", "'Processed reference files'", ")", "config", ".", "fai", "=", "job", ".", "addChildJobFn", "(", "run_samtools_faidx", ",", ...
Spawn the jobs that create index and dict file for reference :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information
[ "Spawn", "the", "jobs", "that", "create", "index", "and", "dict", "file", "for", "reference" ]
python
train
sosreport/sos
sos/plugins/foreman.py
https://github.com/sosreport/sos/blob/2ebc04da53dc871c8dd5243567afa4f8592dca29/sos/plugins/foreman.py#L163-L177
def build_query_cmd(self, query, csv=False): """ Builds the command needed to invoke the pgsql query as the postgres user. The query requires significant quoting work to satisfy both the shell and postgres parsing requirements. Note that this will generate a large amount of quoting in sos logs referencing the command being run """ _cmd = "su postgres -c %s" if not csv: _dbcmd = "psql foreman -c %s" else: _dbcmd = "psql foreman -A -F , -X -c %s" dbq = _dbcmd % quote(query) return _cmd % quote(dbq)
[ "def", "build_query_cmd", "(", "self", ",", "query", ",", "csv", "=", "False", ")", ":", "_cmd", "=", "\"su postgres -c %s\"", "if", "not", "csv", ":", "_dbcmd", "=", "\"psql foreman -c %s\"", "else", ":", "_dbcmd", "=", "\"psql foreman -A -F , -X -c %s\"", "dbq...
Builds the command needed to invoke the pgsql query as the postgres user. The query requires significant quoting work to satisfy both the shell and postgres parsing requirements. Note that this will generate a large amount of quoting in sos logs referencing the command being run
[ "Builds", "the", "command", "needed", "to", "invoke", "the", "pgsql", "query", "as", "the", "postgres", "user", ".", "The", "query", "requires", "significant", "quoting", "work", "to", "satisfy", "both", "the", "shell", "and", "postgres", "parsing", "requireme...
python
train
titusjan/argos
argos/qt/registry.py
https://github.com/titusjan/argos/blob/20d0a3cae26c36ea789a5d219c02ca7df21279dd/argos/qt/registry.py#L296-L305
def removeItem(self, regItem): """ Removes a ClassRegItem object to the registry. Will raise a KeyError if the regItem is not registered. """ check_class(regItem, ClassRegItem) logger.info("Removing {!r} containing {}".format(regItem.identifier, regItem.fullClassName)) del self._index[regItem.identifier] idx = self._items.index(regItem) del self._items[idx]
[ "def", "removeItem", "(", "self", ",", "regItem", ")", ":", "check_class", "(", "regItem", ",", "ClassRegItem", ")", "logger", ".", "info", "(", "\"Removing {!r} containing {}\"", ".", "format", "(", "regItem", ".", "identifier", ",", "regItem", ".", "fullClas...
Removes a ClassRegItem object to the registry. Will raise a KeyError if the regItem is not registered.
[ "Removes", "a", "ClassRegItem", "object", "to", "the", "registry", ".", "Will", "raise", "a", "KeyError", "if", "the", "regItem", "is", "not", "registered", "." ]
python
train
maartenbreddels/ipyvolume
ipyvolume/pylab.py
https://github.com/maartenbreddels/ipyvolume/blob/e68b72852b61276f8e6793bc8811f5b2432a155f/ipyvolume/pylab.py#L1206-L1263
def use(style): """Set the style of the current figure/visualization. :param style: matplotlib style name, or dict with values, or a sequence of these, where the last value overrides previous :return: """ def valid(value): # checks if json'able return isinstance(value, six.string_types) def translate(mplstyle): style = {} mapping = [ ['figure.facecolor', 'background-color'], ['xtick.color', 'axes.x.color'], # TODO: is this the right thing? ['xtick.color', 'axes.z.color'], # map x to z as well ['ytick.color', 'axes.y.color'], ['axes.labelcolor', 'axes.label.color'], ['text.color', 'color'], ['axes.edgecolor', 'axes.color'], ] for from_name, to_name in mapping: if from_name in mplstyle: value = mplstyle[from_name] if "color" in from_name: try: # threejs doesn't like a color like '.13', so try to convert to proper format value = float(value) * 255 value = "rgb(%d, %d, %d)" % (value, value, value) except: pass utils.nested_setitem(style, to_name, value) return style if isinstance(style, six.string_types + (dict,)): styles = [style] else: styles = style fig = gcf() totalstyle = utils.dict_deep_update({}, fig.style) for style in styles: if isinstance(style, six.string_types): if hasattr(ipyvolume.styles, style): style = getattr(ipyvolume.styles, style) else: # lets see if we can copy matplotlib's style # we assume now it's a matplotlib style, get all properties that we understand cleaned_style = { key: value for key, value in dict(matplotlib.style.library[style]).items() if valid(value) } style = translate(cleaned_style) # totalstyle.update(cleaned_style) else: # otherwise assume it's a dict pass totalstyle = utils.dict_deep_update(totalstyle, style) fig = gcf() fig.style = totalstyle
[ "def", "use", "(", "style", ")", ":", "def", "valid", "(", "value", ")", ":", "# checks if json'able", "return", "isinstance", "(", "value", ",", "six", ".", "string_types", ")", "def", "translate", "(", "mplstyle", ")", ":", "style", "=", "{", "}", "m...
Set the style of the current figure/visualization. :param style: matplotlib style name, or dict with values, or a sequence of these, where the last value overrides previous :return:
[ "Set", "the", "style", "of", "the", "current", "figure", "/", "visualization", "." ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/qt/console/completion_widget.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/qt/console/completion_widget.py#L83-L101
def show_items(self, cursor, items): """ Shows the completion widget with 'items' at the position specified by 'cursor'. """ text_edit = self._text_edit point = text_edit.cursorRect(cursor).bottomRight() point = text_edit.mapToGlobal(point) height = self.sizeHint().height() screen_rect = QtGui.QApplication.desktop().availableGeometry(self) if screen_rect.size().height() - point.y() - height < 0: point = text_edit.mapToGlobal(text_edit.cursorRect().topRight()) point.setY(point.y() - height) self.move(point) self._start_position = cursor.position() self.clear() self.addItems(items) self.setCurrentRow(0) self.show()
[ "def", "show_items", "(", "self", ",", "cursor", ",", "items", ")", ":", "text_edit", "=", "self", ".", "_text_edit", "point", "=", "text_edit", ".", "cursorRect", "(", "cursor", ")", ".", "bottomRight", "(", ")", "point", "=", "text_edit", ".", "mapToGl...
Shows the completion widget with 'items' at the position specified by 'cursor'.
[ "Shows", "the", "completion", "widget", "with", "items", "at", "the", "position", "specified", "by", "cursor", "." ]
python
test
DLR-RM/RAFCON
source/rafcon/gui/controllers/state_editor/data_flows.py
https://github.com/DLR-RM/RAFCON/blob/24942ef1a904531f49ab8830a1dbb604441be498/source/rafcon/gui/controllers/state_editor/data_flows.py#L833-L859
def register_view(self, view): """Called when the View was registered Can be used e.g. to connect signals. Here, the destroy signal is connected to close the application """ super(StateDataFlowsEditorController, self).register_view(view) view['add_d_button'].connect('clicked', self.df_list_ctrl.on_add) view['remove_d_button'].connect('clicked', self.df_list_ctrl.on_remove) view['connected_to_d_checkbutton'].connect('toggled', self.toggled_button, 'data_flows_external') view['internal_d_checkbutton'].connect('toggled', self.toggled_button, 'data_flows_internal') if isinstance(self.model.state, LibraryState): view['internal_d_checkbutton'].set_sensitive(False) view['internal_d_checkbutton'].set_active(False) if self.model.parent is not None and isinstance(self.model.parent.state, LibraryState) or \ self.model.state.get_next_upper_library_root_state(): view['add_d_button'].set_sensitive(False) view['remove_d_button'].set_sensitive(False) if self.model.state.is_root_state: self.df_list_ctrl.view_dict['data_flows_external'] = False view['connected_to_d_checkbutton'].set_active(False) if not isinstance(self.model, ContainerStateModel): self.df_list_ctrl.view_dict['data_flows_internal'] = False view['internal_d_checkbutton'].set_active(False)
[ "def", "register_view", "(", "self", ",", "view", ")", ":", "super", "(", "StateDataFlowsEditorController", ",", "self", ")", ".", "register_view", "(", "view", ")", "view", "[", "'add_d_button'", "]", ".", "connect", "(", "'clicked'", ",", "self", ".", "d...
Called when the View was registered Can be used e.g. to connect signals. Here, the destroy signal is connected to close the application
[ "Called", "when", "the", "View", "was", "registered" ]
python
train
saltstack/salt
salt/modules/zabbix.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/zabbix.py#L1226-L1273
def host_get(host=None, name=None, hostids=None, **kwargs): ''' .. versionadded:: 2016.3.0 Retrieve hosts according to the given parameters .. note:: This function accepts all optional host.get parameters: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/host/get :param host: technical name of the host :param name: visible name of the host :param hostids: ids of the hosts :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: Array with convenient hosts details, False if no host found or on failure. CLI Example: .. code-block:: bash salt '*' zabbix.host_get 'Zabbix server' ''' conn_args = _login(**kwargs) ret = {} try: if conn_args: method = 'host.get' params = {"output": "extend", "filter": {}} if not name and not hostids and not host: return False if name: params['filter'].setdefault('name', name) if hostids: params.setdefault('hostids', hostids) if host: params['filter'].setdefault('host', host) params = _params_extend(params, **kwargs) ret = _query(method, params, conn_args['url'], conn_args['auth']) return ret['result'] if ret['result'] else False else: raise KeyError except KeyError: return ret
[ "def", "host_get", "(", "host", "=", "None", ",", "name", "=", "None", ",", "hostids", "=", "None", ",", "*", "*", "kwargs", ")", ":", "conn_args", "=", "_login", "(", "*", "*", "kwargs", ")", "ret", "=", "{", "}", "try", ":", "if", "conn_args", ...
.. versionadded:: 2016.3.0 Retrieve hosts according to the given parameters .. note:: This function accepts all optional host.get parameters: keyword argument names differ depending on your zabbix version, see here__. .. __: https://www.zabbix.com/documentation/2.4/manual/api/reference/host/get :param host: technical name of the host :param name: visible name of the host :param hostids: ids of the hosts :param _connection_user: Optional - zabbix user (can also be set in opts or pillar, see module's docstring) :param _connection_password: Optional - zabbix password (can also be set in opts or pillar, see module's docstring) :param _connection_url: Optional - url of zabbix frontend (can also be set in opts, pillar, see module's docstring) :return: Array with convenient hosts details, False if no host found or on failure. CLI Example: .. code-block:: bash salt '*' zabbix.host_get 'Zabbix server'
[ "..", "versionadded", "::", "2016", ".", "3", ".", "0" ]
python
train
numenta/nupic
src/nupic/data/file_record_stream.py
https://github.com/numenta/nupic/blob/5922fafffdccc8812e72b3324965ad2f7d4bbdad/src/nupic/data/file_record_stream.py#L609-L659
def _updateSequenceInfo(self, r): """Keep track of sequence and make sure time goes forward Check if the current record is the beginning of a new sequence A new sequence starts in 2 cases: 1. The sequence id changed (if there is a sequence id field) 2. The reset field is 1 (if there is a reset field) Note that if there is no sequenceId field or resetId field then the entire dataset is technically one big sequence. The function will not return True for the first record in this case. This is Ok because it is important to detect new sequences only when there are multiple sequences in the file. """ # Get current sequence id (if any) newSequence = False sequenceId = (r[self._sequenceIdIdx] if self._sequenceIdIdx is not None else None) if sequenceId != self._currSequence: # verify that the new sequence didn't show up before if sequenceId in self._sequences: raise Exception('Broken sequence: %s, record: %s' % \ (sequenceId, r)) # add the finished sequence to the set of sequence self._sequences.add(self._currSequence) self._currSequence = sequenceId # Verify that the reset is consistent (if there is one) if self._resetIdx: assert r[self._resetIdx] == 1 newSequence = True else: # Check the reset reset = False if self._resetIdx: reset = r[self._resetIdx] if reset == 1: newSequence = True # If it's still the same old sequence make sure the time flows forward if not newSequence: if self._timeStampIdx and self._currTime is not None: t = r[self._timeStampIdx] if t < self._currTime: raise Exception('No time travel. Early timestamp for record: %s' % r) if self._timeStampIdx: self._currTime = r[self._timeStampIdx]
[ "def", "_updateSequenceInfo", "(", "self", ",", "r", ")", ":", "# Get current sequence id (if any)", "newSequence", "=", "False", "sequenceId", "=", "(", "r", "[", "self", ".", "_sequenceIdIdx", "]", "if", "self", ".", "_sequenceIdIdx", "is", "not", "None", "e...
Keep track of sequence and make sure time goes forward Check if the current record is the beginning of a new sequence A new sequence starts in 2 cases: 1. The sequence id changed (if there is a sequence id field) 2. The reset field is 1 (if there is a reset field) Note that if there is no sequenceId field or resetId field then the entire dataset is technically one big sequence. The function will not return True for the first record in this case. This is Ok because it is important to detect new sequences only when there are multiple sequences in the file.
[ "Keep", "track", "of", "sequence", "and", "make", "sure", "time", "goes", "forward" ]
python
valid
awslabs/mxboard
python/mxboard/event_file_writer.py
https://github.com/awslabs/mxboard/blob/36057ff0f05325c9dc2fe046521325bf9d563a88/python/mxboard/event_file_writer.py#L102-L107
def close(self): """Flushes the pending events and closes the writer after it is done.""" self.flush() if self._recordio_writer is not None: self._recordio_writer.close() self._recordio_writer = None
[ "def", "close", "(", "self", ")", ":", "self", ".", "flush", "(", ")", "if", "self", ".", "_recordio_writer", "is", "not", "None", ":", "self", ".", "_recordio_writer", ".", "close", "(", ")", "self", ".", "_recordio_writer", "=", "None" ]
Flushes the pending events and closes the writer after it is done.
[ "Flushes", "the", "pending", "events", "and", "closes", "the", "writer", "after", "it", "is", "done", "." ]
python
train
inveniosoftware-attic/invenio-comments
invenio_comments/views.py
https://github.com/inveniosoftware-attic/invenio-comments/blob/62bb6e07c146baf75bf8de80b5896ab2a01a8423/invenio_comments/views.py#L189-L213
def comments(recid): """Display comments.""" from invenio_access.local_config import VIEWRESTRCOLL from invenio_access.mailcookie import \ mail_cookie_create_authorize_action from .api import check_user_can_view_comments auth_code, auth_msg = check_user_can_view_comments(current_user, recid) if auth_code and current_user.is_guest: cookie = mail_cookie_create_authorize_action(VIEWRESTRCOLL, { 'collection': g.collection}) url_args = {'action': cookie, 'ln': g.ln, 'referer': request.referrer} flash(_("Authorization failure"), 'error') return redirect(url_for('webaccount.login', **url_args)) elif auth_code: flash(auth_msg, 'error') abort(401) # FIXME check restricted discussion comments = CmtRECORDCOMMENT.query.filter(db.and_( CmtRECORDCOMMENT.id_bibrec == recid, CmtRECORDCOMMENT.in_reply_to_id_cmtRECORDCOMMENT == 0, CmtRECORDCOMMENT.star_score == 0 )).order_by(CmtRECORDCOMMENT.date_creation).all() return render_template('comments/comments.html', comments=comments, option='comments')
[ "def", "comments", "(", "recid", ")", ":", "from", "invenio_access", ".", "local_config", "import", "VIEWRESTRCOLL", "from", "invenio_access", ".", "mailcookie", "import", "mail_cookie_create_authorize_action", "from", ".", "api", "import", "check_user_can_view_comments",...
Display comments.
[ "Display", "comments", "." ]
python
train
ejeschke/ginga
ginga/doc/download_doc.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/doc/download_doc.py#L15-L52
def _find_rtd_version(): """Find closest RTD doc version.""" vstr = 'latest' try: import ginga from bs4 import BeautifulSoup except ImportError: return vstr # No active doc build before this release, just use latest. if not minversion(ginga, '2.6.0'): return vstr # Get RTD download listing. url = 'https://readthedocs.org/projects/ginga/downloads/' with urllib.request.urlopen(url) as r: soup = BeautifulSoup(r, 'html.parser') # Compile a list of available HTML doc versions for download. all_rtd_vernums = [] for link in soup.find_all('a'): href = link.get('href') if 'htmlzip' not in href: continue s = href.split('/')[-2] if s.startswith('v'): # Ignore latest and stable all_rtd_vernums.append(s) all_rtd_vernums.sort(reverse=True) # Find closest match. ginga_ver = ginga.__version__ for rtd_ver in all_rtd_vernums: if ginga_ver > rtd_ver[1:]: # Ignore "v" in comparison break else: vstr = rtd_ver return vstr
[ "def", "_find_rtd_version", "(", ")", ":", "vstr", "=", "'latest'", "try", ":", "import", "ginga", "from", "bs4", "import", "BeautifulSoup", "except", "ImportError", ":", "return", "vstr", "# No active doc build before this release, just use latest.", "if", "not", "mi...
Find closest RTD doc version.
[ "Find", "closest", "RTD", "doc", "version", "." ]
python
train
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_1/git/git_client_base.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_1/git/git_client_base.py#L886-L930
def get_items(self, repository_id, project=None, scope_path=None, recursion_level=None, include_content_metadata=None, latest_processed_change=None, download=None, include_links=None, version_descriptor=None): """GetItems. [Preview API] Get Item Metadata and/or Content for a collection of items. The download parameter is to indicate whether the content should be available as a download or just sent as a stream in the response. Doesn't apply to zipped content which is always returned as a download. :param str repository_id: The name or ID of the repository. :param str project: Project ID or project name :param str scope_path: The path scope. The default is null. :param str recursion_level: The recursion level of this request. The default is 'none', no recursion. :param bool include_content_metadata: Set to true to include content metadata. Default is false. :param bool latest_processed_change: Set to true to include the lastest changes. Default is false. :param bool download: Set to true to download the response as a file. Default is false. :param bool include_links: Set to true to include links to items. Default is false. :param :class:`<GitVersionDescriptor> <azure.devops.v5_1.git.models.GitVersionDescriptor>` version_descriptor: Version descriptor. Default is the default branch for the repository. :rtype: [GitItem] """ route_values = {} if project is not None: route_values['project'] = self._serialize.url('project', project, 'str') if repository_id is not None: route_values['repositoryId'] = self._serialize.url('repository_id', repository_id, 'str') query_parameters = {} if scope_path is not None: query_parameters['scopePath'] = self._serialize.query('scope_path', scope_path, 'str') if recursion_level is not None: query_parameters['recursionLevel'] = self._serialize.query('recursion_level', recursion_level, 'str') if include_content_metadata is not None: query_parameters['includeContentMetadata'] = self._serialize.query('include_content_metadata', include_content_metadata, 'bool') if latest_processed_change is not None: query_parameters['latestProcessedChange'] = self._serialize.query('latest_processed_change', latest_processed_change, 'bool') if download is not None: query_parameters['download'] = self._serialize.query('download', download, 'bool') if include_links is not None: query_parameters['includeLinks'] = self._serialize.query('include_links', include_links, 'bool') if version_descriptor is not None: if version_descriptor.version_type is not None: query_parameters['versionDescriptor.versionType'] = version_descriptor.version_type if version_descriptor.version is not None: query_parameters['versionDescriptor.version'] = version_descriptor.version if version_descriptor.version_options is not None: query_parameters['versionDescriptor.versionOptions'] = version_descriptor.version_options response = self._send(http_method='GET', location_id='fb93c0db-47ed-4a31-8c20-47552878fb44', version='5.1-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[GitItem]', self._unwrap_collection(response))
[ "def", "get_items", "(", "self", ",", "repository_id", ",", "project", "=", "None", ",", "scope_path", "=", "None", ",", "recursion_level", "=", "None", ",", "include_content_metadata", "=", "None", ",", "latest_processed_change", "=", "None", ",", "download", ...
GetItems. [Preview API] Get Item Metadata and/or Content for a collection of items. The download parameter is to indicate whether the content should be available as a download or just sent as a stream in the response. Doesn't apply to zipped content which is always returned as a download. :param str repository_id: The name or ID of the repository. :param str project: Project ID or project name :param str scope_path: The path scope. The default is null. :param str recursion_level: The recursion level of this request. The default is 'none', no recursion. :param bool include_content_metadata: Set to true to include content metadata. Default is false. :param bool latest_processed_change: Set to true to include the lastest changes. Default is false. :param bool download: Set to true to download the response as a file. Default is false. :param bool include_links: Set to true to include links to items. Default is false. :param :class:`<GitVersionDescriptor> <azure.devops.v5_1.git.models.GitVersionDescriptor>` version_descriptor: Version descriptor. Default is the default branch for the repository. :rtype: [GitItem]
[ "GetItems", ".", "[", "Preview", "API", "]", "Get", "Item", "Metadata", "and", "/", "or", "Content", "for", "a", "collection", "of", "items", ".", "The", "download", "parameter", "is", "to", "indicate", "whether", "the", "content", "should", "be", "availab...
python
train
adobe-apiplatform/umapi-client.py
umapi_client/functional.py
https://github.com/adobe-apiplatform/umapi-client.py/blob/1c446d79643cc8615adaa23e12dce3ac5782cf76/umapi_client/functional.py#L182-L205
def add_to_groups(self, groups=None, all_groups=False, group_type=None): """ Add user to some (typically PLC) groups. Note that, if you add to no groups, the effect is simply to do an "add to organization Everybody group", so we let that be done. :param groups: list of group names the user should be added to :param all_groups: a boolean meaning add to all (don't specify groups or group_type in this case) :param group_type: the type of group (defaults to "product") :return: the User, so you can do User(...).add_to_groups(...).add_role(...) """ if all_groups: if groups or group_type: raise ArgumentError("When adding to all groups, do not specify specific groups or types") glist = "all" else: if not groups: groups = [] if not group_type: group_type = GroupTypes.product elif group_type in GroupTypes.__members__: group_type = GroupTypes[group_type] if group_type not in GroupTypes: raise ArgumentError("You must specify a GroupType value for argument group_type") glist = {group_type.name: [group for group in groups]} return self.append(add=glist)
[ "def", "add_to_groups", "(", "self", ",", "groups", "=", "None", ",", "all_groups", "=", "False", ",", "group_type", "=", "None", ")", ":", "if", "all_groups", ":", "if", "groups", "or", "group_type", ":", "raise", "ArgumentError", "(", "\"When adding to all...
Add user to some (typically PLC) groups. Note that, if you add to no groups, the effect is simply to do an "add to organization Everybody group", so we let that be done. :param groups: list of group names the user should be added to :param all_groups: a boolean meaning add to all (don't specify groups or group_type in this case) :param group_type: the type of group (defaults to "product") :return: the User, so you can do User(...).add_to_groups(...).add_role(...)
[ "Add", "user", "to", "some", "(", "typically", "PLC", ")", "groups", ".", "Note", "that", "if", "you", "add", "to", "no", "groups", "the", "effect", "is", "simply", "to", "do", "an", "add", "to", "organization", "Everybody", "group", "so", "we", "let",...
python
train
hardbyte/python-can
can/interfaces/usb2can/usb2canInterface.py
https://github.com/hardbyte/python-can/blob/cdc5254d96072df7739263623f3e920628a7d214/can/interfaces/usb2can/usb2canInterface.py#L162-L174
def _detect_available_configs(serial_matcher=None): """ Uses the Windows Management Instrumentation to identify serial devices. :param str serial_matcher (optional): search string for automatic detection of the device serial """ if serial_matcher: channels = find_serial_devices(serial_matcher) else: channels = find_serial_devices() return [{'interface': 'usb2can', 'channel': c} for c in channels]
[ "def", "_detect_available_configs", "(", "serial_matcher", "=", "None", ")", ":", "if", "serial_matcher", ":", "channels", "=", "find_serial_devices", "(", "serial_matcher", ")", "else", ":", "channels", "=", "find_serial_devices", "(", ")", "return", "[", "{", ...
Uses the Windows Management Instrumentation to identify serial devices. :param str serial_matcher (optional): search string for automatic detection of the device serial
[ "Uses", "the", "Windows", "Management", "Instrumentation", "to", "identify", "serial", "devices", "." ]
python
train
tadashi-aikawa/owlmixin
owlmixin/__init__.py
https://github.com/tadashi-aikawa/owlmixin/blob/7c4a042c3008abddc56a8e8e55ae930d276071f5/owlmixin/__init__.py#L605-L619
def from_csvf(cls, fpath: str, fieldnames: Optional[Sequence[str]]=None, encoding: str='utf8', force_snake_case: bool=True, restrict: bool=True) -> TList[T]: """From csv file path to list of instance :param fpath: Csv file path :param fieldnames: Specify csv header names if not included in the file :param encoding: Csv file encoding :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param restrict: Prohibit extra parameters if True :return: List of Instance """ return cls.from_dicts(util.load_csvf(fpath, fieldnames, encoding), force_snake_case=force_snake_case, force_cast=True, restrict=restrict)
[ "def", "from_csvf", "(", "cls", ",", "fpath", ":", "str", ",", "fieldnames", ":", "Optional", "[", "Sequence", "[", "str", "]", "]", "=", "None", ",", "encoding", ":", "str", "=", "'utf8'", ",", "force_snake_case", ":", "bool", "=", "True", ",", "res...
From csv file path to list of instance :param fpath: Csv file path :param fieldnames: Specify csv header names if not included in the file :param encoding: Csv file encoding :param force_snake_case: Keys are transformed to snake case in order to compliant PEP8 if True :param restrict: Prohibit extra parameters if True :return: List of Instance
[ "From", "csv", "file", "path", "to", "list", "of", "instance" ]
python
train
inveniosoftware/invenio-files-rest
invenio_files_rest/serializer.py
https://github.com/inveniosoftware/invenio-files-rest/blob/59a950da61cc8d5882a03c6fde6db2e2ed10befd/invenio_files_rest/serializer.py#L69-L97
def dump_links(self, o): """Dump links.""" params = {'versionId': o.version_id} data = { 'self': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, **(params if not o.is_head or o.deleted else {}) ), 'version': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True, **params ) } if o.is_head and not o.deleted: data.update({'uploads': url_for( '.object_api', bucket_id=o.bucket_id, key=o.key, _external=True ) + '?uploads', }) return data
[ "def", "dump_links", "(", "self", ",", "o", ")", ":", "params", "=", "{", "'versionId'", ":", "o", ".", "version_id", "}", "data", "=", "{", "'self'", ":", "url_for", "(", "'.object_api'", ",", "bucket_id", "=", "o", ".", "bucket_id", ",", "key", "="...
Dump links.
[ "Dump", "links", "." ]
python
train
lacava/few
few/variation.py
https://github.com/lacava/few/blob/5c72044425e9a5d73b8dc2cbb9b96e873dcb5b4a/few/variation.py#L206-L223
def is_valid_program(self,p): """checks whether program p makes a syntactically valid tree. checks that the accumulated program length is always greater than the accumulated arities, indicating that the appropriate number of arguments is alway present for functions. It then checks that the sum of arties +1 exactly equals the length of the stack, indicating that there are no missing arguments. """ # print("p:",p) arities = list(a.arity[a.in_type] for a in p) accu_arities = list(accumulate(arities)) accu_len = list(np.arange(len(p))+1) check = list(a < b for a,b in zip(accu_arities,accu_len)) # print("accu_arities:",accu_arities) # print("accu_len:",accu_len) # print("accu_arities < accu_len:",accu_arities<accu_len) return all(check) and sum(a.arity[a.in_type] for a in p) +1 == len(p) and len(p)>0
[ "def", "is_valid_program", "(", "self", ",", "p", ")", ":", "# print(\"p:\",p)", "arities", "=", "list", "(", "a", ".", "arity", "[", "a", ".", "in_type", "]", "for", "a", "in", "p", ")", "accu_arities", "=", "list", "(", "accumulate", "(", "arities", ...
checks whether program p makes a syntactically valid tree. checks that the accumulated program length is always greater than the accumulated arities, indicating that the appropriate number of arguments is alway present for functions. It then checks that the sum of arties +1 exactly equals the length of the stack, indicating that there are no missing arguments.
[ "checks", "whether", "program", "p", "makes", "a", "syntactically", "valid", "tree", "." ]
python
train
marcotcr/lime
lime/lime_text.py
https://github.com/marcotcr/lime/blob/08133d47df00ed918e22005e0c98f6eefd5a1d71/lime/lime_text.py#L420-L469
def __data_labels_distances(self, indexed_string, classifier_fn, num_samples, distance_metric='cosine'): """Generates a neighborhood around a prediction. Generates neighborhood data by randomly removing words from the instance, and predicting with the classifier. Uses cosine distance to compute distances between original and perturbed instances. Args: indexed_string: document (IndexedString) to be explained, classifier_fn: classifier prediction probability function, which takes a string and outputs prediction probabilities. For ScikitClassifier, this is classifier.predict_proba. num_samples: size of the neighborhood to learn the linear model distance_metric: the distance metric to use for sample weighting, defaults to cosine similarity. Returns: A tuple (data, labels, distances), where: data: dense num_samples * K binary matrix, where K is the number of tokens in indexed_string. The first row is the original instance, and thus a row of ones. labels: num_samples * L matrix, where L is the number of target labels distances: cosine distance between the original instance and each perturbed instance (computed in the binary 'data' matrix), times 100. """ def distance_fn(x): return sklearn.metrics.pairwise.pairwise_distances( x, x[0], metric=distance_metric).ravel() * 100 doc_size = indexed_string.num_words() sample = self.random_state.randint(1, doc_size + 1, num_samples - 1) data = np.ones((num_samples, doc_size)) data[0] = np.ones(doc_size) features_range = range(doc_size) inverse_data = [indexed_string.raw_string()] for i, size in enumerate(sample, start=1): inactive = self.random_state.choice(features_range, size, replace=False) data[i, inactive] = 0 inverse_data.append(indexed_string.inverse_removing(inactive)) labels = classifier_fn(inverse_data) distances = distance_fn(sp.sparse.csr_matrix(data)) return data, labels, distances
[ "def", "__data_labels_distances", "(", "self", ",", "indexed_string", ",", "classifier_fn", ",", "num_samples", ",", "distance_metric", "=", "'cosine'", ")", ":", "def", "distance_fn", "(", "x", ")", ":", "return", "sklearn", ".", "metrics", ".", "pairwise", "...
Generates a neighborhood around a prediction. Generates neighborhood data by randomly removing words from the instance, and predicting with the classifier. Uses cosine distance to compute distances between original and perturbed instances. Args: indexed_string: document (IndexedString) to be explained, classifier_fn: classifier prediction probability function, which takes a string and outputs prediction probabilities. For ScikitClassifier, this is classifier.predict_proba. num_samples: size of the neighborhood to learn the linear model distance_metric: the distance metric to use for sample weighting, defaults to cosine similarity. Returns: A tuple (data, labels, distances), where: data: dense num_samples * K binary matrix, where K is the number of tokens in indexed_string. The first row is the original instance, and thus a row of ones. labels: num_samples * L matrix, where L is the number of target labels distances: cosine distance between the original instance and each perturbed instance (computed in the binary 'data' matrix), times 100.
[ "Generates", "a", "neighborhood", "around", "a", "prediction", "." ]
python
train
kubernetes-client/python
kubernetes/client/apis/coordination_v1_api.py
https://github.com/kubernetes-client/python/blob/5e512ff564c244c50cab780d821542ed56aa965a/kubernetes/client/apis/coordination_v1_api.py#L153-L180
def delete_collection_namespaced_lease(self, namespace, **kwargs): """ delete collection of Lease This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_lease(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('async_req'): return self.delete_collection_namespaced_lease_with_http_info(namespace, **kwargs) else: (data) = self.delete_collection_namespaced_lease_with_http_info(namespace, **kwargs) return data
[ "def", "delete_collection_namespaced_lease", "(", "self", ",", "namespace", ",", "*", "*", "kwargs", ")", ":", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'async_req'", ")", ":", "return", "self", ".", "dele...
delete collection of Lease This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.delete_collection_namespaced_lease(namespace, async_req=True) >>> result = thread.get() :param async_req bool :param str namespace: object name and auth scope, such as for teams and projects (required) :param str pretty: If 'true', then the output is pretty printed. :param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications. :param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything. :param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything. :param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned. :param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv. :param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity. :param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion. :return: V1Status If the method is called asynchronously, returns the request thread.
[ "delete", "collection", "of", "Lease", "This", "method", "makes", "a", "synchronous", "HTTP", "request", "by", "default", ".", "To", "make", "an", "asynchronous", "HTTP", "request", "please", "pass", "async_req", "=", "True", ">>>", "thread", "=", "api", "."...
python
train
Valassis-Digital-Media/spylon
spylon/spark/launcher.py
https://github.com/Valassis-Digital-Media/spylon/blob/ac00e285fa1c790674606b793819c3e5baee0d48/spylon/spark/launcher.py#L552-L580
def with_spark_context(application_name, conf=None): """Context manager for a spark context Parameters ---------- application_name : string conf : string, optional Returns ------- sc : SparkContext Examples -------- Used within a context manager >>> with with_spark_context("MyApplication") as sc: ... # Your Code here ... pass """ if conf is None: conf = default_configuration assert isinstance(conf, SparkConfiguration) sc = conf.spark_context(application_name) try: yield sc finally: sc.stop()
[ "def", "with_spark_context", "(", "application_name", ",", "conf", "=", "None", ")", ":", "if", "conf", "is", "None", ":", "conf", "=", "default_configuration", "assert", "isinstance", "(", "conf", ",", "SparkConfiguration", ")", "sc", "=", "conf", ".", "spa...
Context manager for a spark context Parameters ---------- application_name : string conf : string, optional Returns ------- sc : SparkContext Examples -------- Used within a context manager >>> with with_spark_context("MyApplication") as sc: ... # Your Code here ... pass
[ "Context", "manager", "for", "a", "spark", "context" ]
python
train
RudolfCardinal/pythonlib
cardinal_pythonlib/rnc_db.py
https://github.com/RudolfCardinal/pythonlib/blob/0b84cb35f38bd7d8723958dae51b480a829b7227/cardinal_pythonlib/rnc_db.py#L2126-L2159
def insert_record_by_dict(self, table: str, valuedict: Dict[str, Any]) -> Optional[int]: """Inserts a record into database, table "table", using a dictionary containing field/value mappings. Returns the new PK (or None).""" if not valuedict: return None n = len(valuedict) fields = [] args = [] for f, v in valuedict.items(): fields.append(self.delimit(f)) args.append(v) query = """ INSERT INTO {table} ({fields}) VALUES ({placeholders}) """.format( table=table, fields=",".join(fields), placeholders=",".join(["?"]*n) ) query = self.localize_sql(query) log.debug("About to insert_record_by_dict with SQL template: " + query) try: cursor = self.db.cursor() debug_sql(query, args) cursor.execute(query, args) new_pk = get_pk_of_last_insert(cursor) log.debug("Record inserted.") return new_pk except: # nopep8 log.exception("insert_record_by_dict: Failed to insert record.") raise
[ "def", "insert_record_by_dict", "(", "self", ",", "table", ":", "str", ",", "valuedict", ":", "Dict", "[", "str", ",", "Any", "]", ")", "->", "Optional", "[", "int", "]", ":", "if", "not", "valuedict", ":", "return", "None", "n", "=", "len", "(", "...
Inserts a record into database, table "table", using a dictionary containing field/value mappings. Returns the new PK (or None).
[ "Inserts", "a", "record", "into", "database", "table", "table", "using", "a", "dictionary", "containing", "field", "/", "value", "mappings", ".", "Returns", "the", "new", "PK", "(", "or", "None", ")", "." ]
python
train
andymccurdy/redis-py
redis/sentinel.py
https://github.com/andymccurdy/redis-py/blob/cdfe2befbe00db4a3c48c9ddd6d64dea15f6f0db/redis/sentinel.py#L109-L125
def rotate_slaves(self): "Round-robin slave balancer" slaves = self.sentinel_manager.discover_slaves(self.service_name) if slaves: if self.slave_rr_counter is None: self.slave_rr_counter = random.randint(0, len(slaves) - 1) for _ in xrange(len(slaves)): self.slave_rr_counter = ( self.slave_rr_counter + 1) % len(slaves) slave = slaves[self.slave_rr_counter] yield slave # Fallback to the master connection try: yield self.get_master_address() except MasterNotFoundError: pass raise SlaveNotFoundError('No slave found for %r' % (self.service_name))
[ "def", "rotate_slaves", "(", "self", ")", ":", "slaves", "=", "self", ".", "sentinel_manager", ".", "discover_slaves", "(", "self", ".", "service_name", ")", "if", "slaves", ":", "if", "self", ".", "slave_rr_counter", "is", "None", ":", "self", ".", "slave...
Round-robin slave balancer
[ "Round", "-", "robin", "slave", "balancer" ]
python
train
cpenv/cpenv
cpenv/cli.py
https://github.com/cpenv/cpenv/blob/afbb569ae04002743db041d3629a5be8c290bd89/cpenv/cli.py#L144-L205
def activate(paths, skip_local, skip_shared): '''Activate an environment''' if not paths: ctx = click.get_current_context() if cpenv.get_active_env(): ctx.invoke(info) return click.echo(ctx.get_help()) examples = ( '\nExamples: \n' ' cpenv activate my_env\n' ' cpenv activate ./relative/path/to/my_env\n' ' cpenv activate my_env my_module\n' ) click.echo(examples) return if skip_local: cpenv.module_resolvers.remove(cpenv.resolver.module_resolver) cpenv.module_resolvers.remove(cpenv.resolver.active_env_module_resolver) if skip_shared: cpenv.module_resolvers.remove(cpenv.resolver.modules_path_resolver) try: r = cpenv.resolve(*paths) except cpenv.ResolveError as e: click.echo('\n' + str(e)) return resolved = set(r.resolved) active_modules = set() env = cpenv.get_active_env() if env: active_modules.add(env) active_modules.update(cpenv.get_active_modules()) new_modules = resolved - active_modules old_modules = active_modules & resolved if old_modules and not new_modules: click.echo( '\nModules already active: ' + bold(' '.join([obj.name for obj in old_modules])) ) return if env and contains_env(new_modules): click.echo('\nUse bold(exit) to leave your active environment first.') return click.echo('\nResolved the following modules...') click.echo(format_objects(r.resolved)) r.activate() click.echo(blue('\nLaunching subshell...')) modules = sorted(resolved | active_modules, key=_type_and_name) prompt = ':'.join([obj.name for obj in modules]) shell.launch(prompt)
[ "def", "activate", "(", "paths", ",", "skip_local", ",", "skip_shared", ")", ":", "if", "not", "paths", ":", "ctx", "=", "click", ".", "get_current_context", "(", ")", "if", "cpenv", ".", "get_active_env", "(", ")", ":", "ctx", ".", "invoke", "(", "inf...
Activate an environment
[ "Activate", "an", "environment" ]
python
valid
fermiPy/fermipy
fermipy/jobs/job_archive.py
https://github.com/fermiPy/fermipy/blob/9df5e7e3728307fd58c5bba36fd86783c39fbad4/fermipy/jobs/job_archive.py#L690-L705
def main_browse(): """Entry point for command line use for browsing a JobArchive """ parser = argparse.ArgumentParser(usage="job_archive.py [options]", description="Browse a job archive") parser.add_argument('--jobs', action='store', dest='job_archive_table', type=str, default='job_archive_temp2.fits', help="Job archive file") parser.add_argument('--files', action='store', dest='file_archive_table', type=str, default='file_archive_temp2.fits', help="File archive file") parser.add_argument('--base', action='store', dest='base_path', type=str, default=os.path.abspath('.'), help="File archive base path") args = parser.parse_args(sys.argv[1:]) job_ar = JobArchive.build_archive(**args.__dict__) job_ar.table.pprint()
[ "def", "main_browse", "(", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", "usage", "=", "\"job_archive.py [options]\"", ",", "description", "=", "\"Browse a job archive\"", ")", "parser", ".", "add_argument", "(", "'--jobs'", ",", "action", "=", ...
Entry point for command line use for browsing a JobArchive
[ "Entry", "point", "for", "command", "line", "use", "for", "browsing", "a", "JobArchive" ]
python
train
bitesofcode/projexui
projexui/widgets/xwalkthroughwidget/xwalkthroughgraphics.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xwalkthroughwidget/xwalkthroughgraphics.py#L179-L196
def load(self, graphic): """ Loads information for this item from the xml data. :param graphic | <XWalkthroughItem> """ for prop in graphic.properties(): key = prop.name() value = prop.value() if key == 'caption': value = projex.wikitext.render(value.strip()) self.setProperty(key, value) for attr, attr_value in prop.attributes().items(): self.setProperty('{0}_{1}'.format(key, attr), attr_value) self.prepare()
[ "def", "load", "(", "self", ",", "graphic", ")", ":", "for", "prop", "in", "graphic", ".", "properties", "(", ")", ":", "key", "=", "prop", ".", "name", "(", ")", "value", "=", "prop", ".", "value", "(", ")", "if", "key", "==", "'caption'", ":", ...
Loads information for this item from the xml data. :param graphic | <XWalkthroughItem>
[ "Loads", "information", "for", "this", "item", "from", "the", "xml", "data", ".", ":", "param", "graphic", "|", "<XWalkthroughItem", ">" ]
python
train
lablup/backend.ai-client-py
src/ai/backend/client/cli/files.py
https://github.com/lablup/backend.ai-client-py/blob/a063d774fea6f4350b89498c40d3c837ec3029a7/src/ai/backend/client/cli/files.py#L43-L62
def download(sess_id_or_alias, files, dest): """ Download files from a running container. \b SESSID: Session ID or its alias given when creating the session. FILES: Paths inside container. """ if len(files) < 1: return with Session() as session: try: print_wait('Downloading file(s) from {}...' .format(sess_id_or_alias)) kernel = session.Kernel(sess_id_or_alias) kernel.download(files, dest, show_progress=True) print_done('Downloaded to {}.'.format(dest.resolve())) except Exception as e: print_error(e) sys.exit(1)
[ "def", "download", "(", "sess_id_or_alias", ",", "files", ",", "dest", ")", ":", "if", "len", "(", "files", ")", "<", "1", ":", "return", "with", "Session", "(", ")", "as", "session", ":", "try", ":", "print_wait", "(", "'Downloading file(s) from {}...'", ...
Download files from a running container. \b SESSID: Session ID or its alias given when creating the session. FILES: Paths inside container.
[ "Download", "files", "from", "a", "running", "container", "." ]
python
train
ministryofjustice/money-to-prisoners-common
mtp_common/build_tasks/tasks.py
https://github.com/ministryofjustice/money-to-prisoners-common/blob/33c43a2912cb990d9148da7c8718f480f07d90a1/mtp_common/build_tasks/tasks.py#L317-L323
def additional_assets(context: Context): """ Collects assets from GOV.UK frontend toolkit """ rsync_flags = '-avz' if context.verbosity == 2 else '-az' for path in context.app.additional_asset_paths: context.shell('rsync %s %s %s/' % (rsync_flags, path, context.app.asset_build_path))
[ "def", "additional_assets", "(", "context", ":", "Context", ")", ":", "rsync_flags", "=", "'-avz'", "if", "context", ".", "verbosity", "==", "2", "else", "'-az'", "for", "path", "in", "context", ".", "app", ".", "additional_asset_paths", ":", "context", ".",...
Collects assets from GOV.UK frontend toolkit
[ "Collects", "assets", "from", "GOV", ".", "UK", "frontend", "toolkit" ]
python
train
jtwhite79/pyemu
pyemu/utils/helpers.py
https://github.com/jtwhite79/pyemu/blob/c504d8e7a4097cec07655a6318d275739bd8148a/pyemu/utils/helpers.py#L2901-L2920
def write_forward_run(self): """ write the forward run script forward_run.py """ with open(os.path.join(self.m.model_ws,self.forward_run_file),'w') as f: f.write("import os\nimport numpy as np\nimport pandas as pd\nimport flopy\n") f.write("import pyemu\n") for ex_imp in self.extra_forward_imports: f.write('import {0}\n'.format(ex_imp)) for tmp_file in self.tmp_files: f.write("try:\n") f.write(" os.remove('{0}')\n".format(tmp_file)) f.write("except Exception as e:\n") f.write(" print('error removing tmp file:{0}')\n".format(tmp_file)) for line in self.frun_pre_lines: f.write(line+'\n') for line in self.frun_model_lines: f.write(line+'\n') for line in self.frun_post_lines: f.write(line+'\n')
[ "def", "write_forward_run", "(", "self", ")", ":", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "m", ".", "model_ws", ",", "self", ".", "forward_run_file", ")", ",", "'w'", ")", "as", "f", ":", "f", ".", "write", "(", "\...
write the forward run script forward_run.py
[ "write", "the", "forward", "run", "script", "forward_run", ".", "py" ]
python
train
RRZE-HPC/kerncraft
kerncraft/kernel.py
https://github.com/RRZE-HPC/kerncraft/blob/c60baf8043e4da8d8d66da7575021c2f4c6c78af/kerncraft/kernel.py#L84-L107
def transform_multidim_to_1d_ref(aref, dimension_dict): """ Transform ast of multidimensional reference to a single dimension reference. In-place operation! """ dims = [] name = aref while type(name) is c_ast.ArrayRef: dims.append(name.subscript) name = name.name subscript_list = [] for i, d in enumerate(dims): if i == 0: subscript_list.append(d) else: subscript_list.append(c_ast.BinaryOp('*', d, reduce( lambda l, r: c_ast.BinaryOp('*', l, r), dimension_dict[name.name][-1:-i-1:-1]))) aref.subscript = reduce( lambda l, r: c_ast.BinaryOp('+', l, r), subscript_list) aref.name = name
[ "def", "transform_multidim_to_1d_ref", "(", "aref", ",", "dimension_dict", ")", ":", "dims", "=", "[", "]", "name", "=", "aref", "while", "type", "(", "name", ")", "is", "c_ast", ".", "ArrayRef", ":", "dims", ".", "append", "(", "name", ".", "subscript",...
Transform ast of multidimensional reference to a single dimension reference. In-place operation!
[ "Transform", "ast", "of", "multidimensional", "reference", "to", "a", "single", "dimension", "reference", "." ]
python
test
nats-io/asyncio-nats
nats/aio/client.py
https://github.com/nats-io/asyncio-nats/blob/39e840be0b12ce326edac0bba69aeb1be930dcb8/nats/aio/client.py#L735-L763
def timed_request(self, subject, payload, timeout=0.5): """ Implements the request/response pattern via pub/sub using an ephemeral subscription which will be published with a limited interest of 1 reply returning the response or raising a Timeout error. ->> SUB _INBOX.2007314fe0fcb2cdc2a2914c1 90 ->> UNSUB 90 1 ->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5 """ next_inbox = INBOX_PREFIX[:] next_inbox.extend(self._nuid.next()) inbox = next_inbox.decode() future = asyncio.Future(loop=self._loop) sid = yield from self.subscribe(inbox, future=future, max_msgs=1) yield from self.auto_unsubscribe(sid, 1) yield from self.publish_request(subject, inbox, payload) try: msg = yield from asyncio.wait_for(future, timeout, loop=self._loop) return msg except asyncio.TimeoutError: future.cancel() raise ErrTimeout
[ "def", "timed_request", "(", "self", ",", "subject", ",", "payload", ",", "timeout", "=", "0.5", ")", ":", "next_inbox", "=", "INBOX_PREFIX", "[", ":", "]", "next_inbox", ".", "extend", "(", "self", ".", "_nuid", ".", "next", "(", ")", ")", "inbox", ...
Implements the request/response pattern via pub/sub using an ephemeral subscription which will be published with a limited interest of 1 reply returning the response or raising a Timeout error. ->> SUB _INBOX.2007314fe0fcb2cdc2a2914c1 90 ->> UNSUB 90 1 ->> PUB hello _INBOX.2007314fe0fcb2cdc2a2914c1 5 ->> MSG_PAYLOAD: world <<- MSG hello 2 _INBOX.2007314fe0fcb2cdc2a2914c1 5
[ "Implements", "the", "request", "/", "response", "pattern", "via", "pub", "/", "sub", "using", "an", "ephemeral", "subscription", "which", "will", "be", "published", "with", "a", "limited", "interest", "of", "1", "reply", "returning", "the", "response", "or", ...
python
test
Kortemme-Lab/klab
klab/bio/pdb.py
https://github.com/Kortemme-Lab/klab/blob/6d410ad08f1bd9f7cbbb28d7d946e94fbaaa2b6b/klab/bio/pdb.py#L2340-L2347
def getAtomLinesForResidueInRosettaStructure(self, resid): '''We assume a Rosetta-generated structure where residues are uniquely identified by number.''' lines = [line for line in self.lines if line[0:4] == "ATOM" and resid == int(line[22:27])] if not lines: #print('Failed searching for residue %d.' % resid) #print("".join([line for line in self.lines if line[0:4] == "ATOM"])) raise Exception("Could not find the ATOM/HETATM line corresponding to residue '%(resid)s'." % vars()) return lines
[ "def", "getAtomLinesForResidueInRosettaStructure", "(", "self", ",", "resid", ")", ":", "lines", "=", "[", "line", "for", "line", "in", "self", ".", "lines", "if", "line", "[", "0", ":", "4", "]", "==", "\"ATOM\"", "and", "resid", "==", "int", "(", "li...
We assume a Rosetta-generated structure where residues are uniquely identified by number.
[ "We", "assume", "a", "Rosetta", "-", "generated", "structure", "where", "residues", "are", "uniquely", "identified", "by", "number", "." ]
python
train
duniter/duniter-python-api
duniterpy/api/endpoint.py
https://github.com/duniter/duniter-python-api/blob/3a1e5d61a2f72f5afaf29d010c6cf4dff3648165/duniterpy/api/endpoint.py#L506-L513
def inline(self) -> str: """ Return endpoint string :return: """ inlined = [str(info) for info in (self.server, self.port) if info] return ESSubscribtionEndpoint.API + " " + " ".join(inlined)
[ "def", "inline", "(", "self", ")", "->", "str", ":", "inlined", "=", "[", "str", "(", "info", ")", "for", "info", "in", "(", "self", ".", "server", ",", "self", ".", "port", ")", "if", "info", "]", "return", "ESSubscribtionEndpoint", ".", "API", "+...
Return endpoint string :return:
[ "Return", "endpoint", "string" ]
python
train
brean/python-pathfinding
pathfinding/finder/bi_a_star.py
https://github.com/brean/python-pathfinding/blob/b857bf85e514a1712b40e29ccb5a473cd7fd5c80/pathfinding/finder/bi_a_star.py#L38-L77
def find_path(self, start, end, grid): """ find a path from start to end node on grid using the A* algorithm :param start: start node :param end: end node :param grid: grid that stores all possible steps/tiles as 2D-list :return: """ self.start_time = time.time() # execution time limitation self.runs = 0 # count number of iterations start_open_list = [start] start.g = 0 start.f = 0 start.opened = BY_START end_open_list = [end] end.g = 0 end.f = 0 end.opened = BY_END while len(start_open_list) > 0 and len(end_open_list) > 0: self.runs += 1 self.keep_running() path = self.check_neighbors(start, end, grid, start_open_list, open_value=BY_START, backtrace_by=BY_END) if path: return path, self.runs self.runs += 1 self.keep_running() path = self.check_neighbors(end, start, grid, end_open_list, open_value=BY_END, backtrace_by=BY_START) if path: return path, self.runs # failed to find path return [], self.runs
[ "def", "find_path", "(", "self", ",", "start", ",", "end", ",", "grid", ")", ":", "self", ".", "start_time", "=", "time", ".", "time", "(", ")", "# execution time limitation", "self", ".", "runs", "=", "0", "# count number of iterations", "start_open_list", ...
find a path from start to end node on grid using the A* algorithm :param start: start node :param end: end node :param grid: grid that stores all possible steps/tiles as 2D-list :return:
[ "find", "a", "path", "from", "start", "to", "end", "node", "on", "grid", "using", "the", "A", "*", "algorithm", ":", "param", "start", ":", "start", "node", ":", "param", "end", ":", "end", "node", ":", "param", "grid", ":", "grid", "that", "stores",...
python
train
iotile/coretools
iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/link.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/config/scons-local-3.0.1/SCons/Tool/link.py#L180-L221
def _versioned_lib_symlinks(env, libnode, version, prefix, suffix, name_func, soname_func): """Generate link names that should be created for a versioned shared lirbrary. Returns a dictionary in the form { linkname : linktarget } """ Verbose = False if Verbose: print("_versioned_lib_symlinks: libnode={:r}".format(libnode.get_path())) print("_versioned_lib_symlinks: version={:r}".format(version)) if sys.platform.startswith('openbsd'): # OpenBSD uses x.y shared library versioning numbering convention # and doesn't use symlinks to backwards-compatible libraries if Verbose: print("_versioned_lib_symlinks: return symlinks={:r}".format(None)) return None linkdir = libnode.get_dir() if Verbose: print("_versioned_lib_symlinks: linkdir={:r}".format(linkdir.get_path())) name = name_func(env, libnode, version, prefix, suffix) if Verbose: print("_versioned_lib_symlinks: name={:r}".format(name)) soname = soname_func(env, libnode, version, prefix, suffix) link0 = env.fs.File(soname, linkdir) link1 = env.fs.File(name, linkdir) # We create direct symlinks, not daisy-chained. if link0 == libnode: # This enables SHLIBVERSION without periods (e.g. SHLIBVERSION=1) symlinks = [ (link1, libnode) ] else: # This handles usual SHLIBVERSION, i.e. '1.2', '1.2.3', etc. symlinks = [ (link0, libnode), (link1, libnode) ] if Verbose: print("_versioned_lib_symlinks: return symlinks={:r}".format(SCons.Tool.StringizeLibSymlinks(symlinks))) return symlinks
[ "def", "_versioned_lib_symlinks", "(", "env", ",", "libnode", ",", "version", ",", "prefix", ",", "suffix", ",", "name_func", ",", "soname_func", ")", ":", "Verbose", "=", "False", "if", "Verbose", ":", "print", "(", "\"_versioned_lib_symlinks: libnode={:r}\"", ...
Generate link names that should be created for a versioned shared lirbrary. Returns a dictionary in the form { linkname : linktarget }
[ "Generate", "link", "names", "that", "should", "be", "created", "for", "a", "versioned", "shared", "lirbrary", ".", "Returns", "a", "dictionary", "in", "the", "form", "{", "linkname", ":", "linktarget", "}" ]
python
train
ransford/sllurp
sllurp/llrp.py
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1201-L1220
def setTxPower(self, tx_power): """Set the transmission power for one or more antennas. @param tx_power: index into self.tx_power_table """ tx_pow_validated = self.get_tx_power(tx_power) logger.debug('tx_pow_validated: %s', tx_pow_validated) needs_update = False for ant, (tx_pow_idx, tx_pow_dbm) in tx_pow_validated.items(): if self.tx_power[ant] != tx_pow_idx: self.tx_power[ant] = tx_pow_idx needs_update = True logger.debug('tx_power for antenna %s: %s (%s dBm)', ant, tx_pow_idx, tx_pow_dbm) if needs_update and self.state == LLRPClient.STATE_INVENTORYING: logger.debug('changing tx power; will stop politely, then resume') d = self.stopPolitely() d.addCallback(self.startInventory, force_regen_rospec=True)
[ "def", "setTxPower", "(", "self", ",", "tx_power", ")", ":", "tx_pow_validated", "=", "self", ".", "get_tx_power", "(", "tx_power", ")", "logger", ".", "debug", "(", "'tx_pow_validated: %s'", ",", "tx_pow_validated", ")", "needs_update", "=", "False", "for", "...
Set the transmission power for one or more antennas. @param tx_power: index into self.tx_power_table
[ "Set", "the", "transmission", "power", "for", "one", "or", "more", "antennas", "." ]
python
train
spyder-ide/spyder-kernels
spyder_kernels/customize/spydercustomize.py
https://github.com/spyder-ide/spyder-kernels/blob/2c5b36cdb797b8aba77bc406ca96f5e079c4aaca/spyder_kernels/customize/spydercustomize.py#L731-L758
def post_mortem_excepthook(type, value, tb): """ For post mortem exception handling, print a banner and enable post mortem debugging. """ clear_post_mortem() ipython_shell = get_ipython() ipython_shell.showtraceback((type, value, tb)) p = pdb.Pdb(ipython_shell.colors) if not type == SyntaxError: # wait for stderr to print (stderr.flush does not work in this case) time.sleep(0.1) _print('*' * 40) _print('Entering post mortem debugging...') _print('*' * 40) # add ability to move between frames p.send_initial_notification = False p.reset() frame = tb.tb_frame prev = frame while frame.f_back: prev = frame frame = frame.f_back frame = prev # wait for stdout to print time.sleep(0.1) p.interaction(frame, tb)
[ "def", "post_mortem_excepthook", "(", "type", ",", "value", ",", "tb", ")", ":", "clear_post_mortem", "(", ")", "ipython_shell", "=", "get_ipython", "(", ")", "ipython_shell", ".", "showtraceback", "(", "(", "type", ",", "value", ",", "tb", ")", ")", "p", ...
For post mortem exception handling, print a banner and enable post mortem debugging.
[ "For", "post", "mortem", "exception", "handling", "print", "a", "banner", "and", "enable", "post", "mortem", "debugging", "." ]
python
train
IdentityPython/oidcendpoint
src/oidcendpoint/oidc/token.py
https://github.com/IdentityPython/oidcendpoint/blob/6c1d729d51bfb6332816117fe476073df7a1d823/src/oidcendpoint/oidc/token.py#L137-L164
def _post_parse_request(self, request, client_id='', **kwargs): """ This is where clients come to get their access tokens :param request: The request :param authn: Authentication info, comes from HTTP header :returns: """ if 'state' in request: try: sinfo = self.endpoint_context.sdb[request['code']] except KeyError: logger.error('Code not present in SessionDB') return self.error_cls(error="unauthorized_client") else: state = sinfo['authn_req']['state'] if state != request['state']: logger.error('State value mismatch') return self.error_cls(error="unauthorized_client") if "client_id" not in request: # Optional for access token request request["client_id"] = client_id logger.debug("%s: %s" % (request.__class__.__name__, sanitize(request))) return request
[ "def", "_post_parse_request", "(", "self", ",", "request", ",", "client_id", "=", "''", ",", "*", "*", "kwargs", ")", ":", "if", "'state'", "in", "request", ":", "try", ":", "sinfo", "=", "self", ".", "endpoint_context", ".", "sdb", "[", "request", "["...
This is where clients come to get their access tokens :param request: The request :param authn: Authentication info, comes from HTTP header :returns:
[ "This", "is", "where", "clients", "come", "to", "get", "their", "access", "tokens" ]
python
train
benhoff/pluginmanager
pluginmanager/module_manager.py
https://github.com/benhoff/pluginmanager/blob/a8a184f9ebfbb521703492cb88c1dbda4cd04c06/pluginmanager/module_manager.py#L122-L131
def add_module_plugin_filters(self, module_plugin_filters): """ Adds `module_plugin_filters` to the internal module filters. May be a single object or an iterable. Every module filters must be a callable and take in a list of plugins and their associated names. """ module_plugin_filters = util.return_list(module_plugin_filters) self.module_plugin_filters.extend(module_plugin_filters)
[ "def", "add_module_plugin_filters", "(", "self", ",", "module_plugin_filters", ")", ":", "module_plugin_filters", "=", "util", ".", "return_list", "(", "module_plugin_filters", ")", "self", ".", "module_plugin_filters", ".", "extend", "(", "module_plugin_filters", ")" ]
Adds `module_plugin_filters` to the internal module filters. May be a single object or an iterable. Every module filters must be a callable and take in a list of plugins and their associated names.
[ "Adds", "module_plugin_filters", "to", "the", "internal", "module", "filters", ".", "May", "be", "a", "single", "object", "or", "an", "iterable", "." ]
python
train
saltstack/salt
salt/modules/boto3_sns.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/boto3_sns.py#L99-L127
def describe_topic(name, region=None, key=None, keyid=None, profile=None): ''' Returns details about a specific SNS topic, specified by name or ARN. CLI example:: salt my_favorite_client boto3_sns.describe_topic a_sns_topic_of_my_choice ''' topics = list_topics(region=region, key=key, keyid=keyid, profile=profile) ret = {} for topic, arn in topics.items(): if name in (topic, arn): ret = {'TopicArn': arn} ret['Attributes'] = get_topic_attributes(arn, region=region, key=key, keyid=keyid, profile=profile) ret['Subscriptions'] = list_subscriptions_by_topic(arn, region=region, key=key, keyid=keyid, profile=profile) # Grab extended attributes for the above subscriptions for sub in range(len(ret['Subscriptions'])): sub_arn = ret['Subscriptions'][sub]['SubscriptionArn'] if not sub_arn.startswith('arn:aws:sns:'): # Sometimes a sub is in e.g. PendingAccept or other # wierd states and doesn't have an ARN yet log.debug('Subscription with invalid ARN %s skipped...', sub_arn) continue deets = get_subscription_attributes(SubscriptionArn=sub_arn, region=region, key=key, keyid=keyid, profile=profile) ret['Subscriptions'][sub].update(deets) return ret
[ "def", "describe_topic", "(", "name", ",", "region", "=", "None", ",", "key", "=", "None", ",", "keyid", "=", "None", ",", "profile", "=", "None", ")", ":", "topics", "=", "list_topics", "(", "region", "=", "region", ",", "key", "=", "key", ",", "k...
Returns details about a specific SNS topic, specified by name or ARN. CLI example:: salt my_favorite_client boto3_sns.describe_topic a_sns_topic_of_my_choice
[ "Returns", "details", "about", "a", "specific", "SNS", "topic", "specified", "by", "name", "or", "ARN", "." ]
python
train
rodricios/eatiht
eatiht/etv2.py
https://github.com/rodricios/eatiht/blob/7341c46327cfe7e0c9f226ef5e9808975c4d43da/eatiht/etv2.py#L209-L235
def get_textnode_subtrees(html_tree, xpath_to_text=TEXT_FINDER_XPATH): """A modification of get_sentence_xpath_tuples: some code was refactored-out, variable names are slightly different. This function does wrap the ltml.tree construction, so a file path, file-like structure, or URL is required. """ try: xpath_finder = html_tree.getroot().getroottree().getpath except(AttributeError): xpath_finder = html_tree.getroottree().getpath nodes_with_text = html_tree.xpath(xpath_to_text) # Within the TextNodeSubTree construction, the ABSL is calculated # refer to eatiht_trees.py parentpaths_textnodes = [TextNodeSubTree(n, xpath_finder(n), n.xpath('.//text()')) for n in nodes_with_text] if len(parentpaths_textnodes) is 0: raise Exception("No text nodes satisfied the xpath:\n\n" + xpath_to_text + "\n\nThis can be due to user's" + " custom xpath, min_str_length value, or both") return parentpaths_textnodes
[ "def", "get_textnode_subtrees", "(", "html_tree", ",", "xpath_to_text", "=", "TEXT_FINDER_XPATH", ")", ":", "try", ":", "xpath_finder", "=", "html_tree", ".", "getroot", "(", ")", ".", "getroottree", "(", ")", ".", "getpath", "except", "(", "AttributeError", "...
A modification of get_sentence_xpath_tuples: some code was refactored-out, variable names are slightly different. This function does wrap the ltml.tree construction, so a file path, file-like structure, or URL is required.
[ "A", "modification", "of", "get_sentence_xpath_tuples", ":", "some", "code", "was", "refactored", "-", "out", "variable", "names", "are", "slightly", "different", ".", "This", "function", "does", "wrap", "the", "ltml", ".", "tree", "construction", "so", "a", "...
python
train
saltstack/salt
salt/cloud/clouds/opennebula.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/cloud/clouds/opennebula.py#L4301-L4384
def vn_release(call=None, kwargs=None): ''' Releases a virtual network lease that was previously on hold. .. versionadded:: 2016.3.0 vn_id The ID of the virtual network from which to release the lease. Can be used instead of ``vn_name``. vn_name The name of the virtual network from which to release the lease. Can be used instead of ``vn_id``. path The path to a file defining the template of the lease to release. Syntax within the file can be the usual attribute=value or XML. Can be used instead of ``data``. data Contains the template defining the lease to release. Syntax can be the usual attribute=value or XML. Can be used instead of ``path``. CLI Example: .. code-block:: bash salt-cloud -f vn_release opennebula vn_id=3 path=/path/to/vn_release_file.txt salt-cloud =f vn_release opennebula vn_name=my-vn data="LEASES=[IP=192.168.0.5]" ''' if call != 'function': raise SaltCloudSystemExit( 'The vn_reserve function must be called with -f or --function.' ) if kwargs is None: kwargs = {} vn_id = kwargs.get('vn_id', None) vn_name = kwargs.get('vn_name', None) path = kwargs.get('path', None) data = kwargs.get('data', None) if vn_id: if vn_name: log.warning( 'Both the \'vn_id\' and \'vn_name\' arguments were provided. ' '\'vn_id\' will take precedence.' ) elif vn_name: vn_id = get_vn_id(kwargs={'name': vn_name}) else: raise SaltCloudSystemExit( 'The vn_release function requires a \'vn_id\' or a \'vn_name\' to ' 'be provided.' ) if data: if path: log.warning( 'Both the \'data\' and \'path\' arguments were provided. ' '\'data\' will take precedence.' ) elif path: with salt.utils.files.fopen(path, mode='r') as rfh: data = rfh.read() else: raise SaltCloudSystemExit( 'The vn_release function requires either \'data\' or a \'path\' to ' 'be provided.' ) server, user, password = _get_xml_rpc() auth = ':'.join([user, password]) response = server.one.vn.release(auth, int(vn_id), data) ret = { 'action': 'vn.release', 'released': response[0], 'resource_id': response[1], 'error_code': response[2], } return ret
[ "def", "vn_release", "(", "call", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "call", "!=", "'function'", ":", "raise", "SaltCloudSystemExit", "(", "'The vn_reserve function must be called with -f or --function.'", ")", "if", "kwargs", "is", "None", "...
Releases a virtual network lease that was previously on hold. .. versionadded:: 2016.3.0 vn_id The ID of the virtual network from which to release the lease. Can be used instead of ``vn_name``. vn_name The name of the virtual network from which to release the lease. Can be used instead of ``vn_id``. path The path to a file defining the template of the lease to release. Syntax within the file can be the usual attribute=value or XML. Can be used instead of ``data``. data Contains the template defining the lease to release. Syntax can be the usual attribute=value or XML. Can be used instead of ``path``. CLI Example: .. code-block:: bash salt-cloud -f vn_release opennebula vn_id=3 path=/path/to/vn_release_file.txt salt-cloud =f vn_release opennebula vn_name=my-vn data="LEASES=[IP=192.168.0.5]"
[ "Releases", "a", "virtual", "network", "lease", "that", "was", "previously", "on", "hold", "." ]
python
train
buildbot/buildbot
master/buildbot/www/service.py
https://github.com/buildbot/buildbot/blob/5df3cfae6d760557d99156633c32b1822a1e130c/master/buildbot/www/service.py#L122-L134
def uid(self): """uid is now generated automatically according to the claims. This should actually only be used for cookie generation """ exp = datetime.datetime.utcnow() + self.expDelay claims = { 'user_info': self.user_info, # Note that we use JWT standard 'exp' field to implement session expiration # we completely bypass twisted.web session expiration mechanisms 'exp': calendar.timegm(datetime.datetime.timetuple(exp))} return jwt.encode(claims, self.site.session_secret, algorithm=SESSION_SECRET_ALGORITHM)
[ "def", "uid", "(", "self", ")", ":", "exp", "=", "datetime", ".", "datetime", ".", "utcnow", "(", ")", "+", "self", ".", "expDelay", "claims", "=", "{", "'user_info'", ":", "self", ".", "user_info", ",", "# Note that we use JWT standard 'exp' field to implemen...
uid is now generated automatically according to the claims. This should actually only be used for cookie generation
[ "uid", "is", "now", "generated", "automatically", "according", "to", "the", "claims", "." ]
python
train
juju/charm-helpers
charmhelpers/contrib/openstack/utils.py
https://github.com/juju/charm-helpers/blob/aa785c40c3b7a8c69dbfbc7921d6b9f30142e171/charmhelpers/contrib/openstack/utils.py#L1255-L1296
def check_actually_paused(services=None, ports=None): """Check that services listed in the services object and ports are actually closed (not listened to), to verify that the unit is properly paused. @param services: See _extract_services_list_helper @returns status, : string for status (None if okay) message : string for problem for status_set """ state = None message = None messages = [] if services is not None: services = _extract_services_list_helper(services) services_running, services_states = _check_running_services(services) if any(services_states): # there shouldn't be any running so this is a problem messages.append("these services running: {}" .format(", ".join( _filter_tuples(services_running, True)))) state = "blocked" ports_open, ports_open_bools = ( _check_listening_on_services_ports(services, True)) if any(ports_open_bools): message_parts = {service: ", ".join([str(v) for v in open_ports]) for service, open_ports in ports_open.items()} message = ", ".join( ["{}: [{}]".format(s, sp) for s, sp in message_parts.items()]) messages.append( "these service:ports are open: {}".format(message)) state = 'blocked' if ports is not None: ports_open, bools = _check_listening_on_ports_list(ports) if any(bools): messages.append( "these ports which should be closed, but are open: {}" .format(", ".join([str(p) for p, v in ports_open if v]))) state = 'blocked' if messages: message = ("Services should be paused but {}" .format(", ".join(messages))) return state, message
[ "def", "check_actually_paused", "(", "services", "=", "None", ",", "ports", "=", "None", ")", ":", "state", "=", "None", "message", "=", "None", "messages", "=", "[", "]", "if", "services", "is", "not", "None", ":", "services", "=", "_extract_services_list...
Check that services listed in the services object and ports are actually closed (not listened to), to verify that the unit is properly paused. @param services: See _extract_services_list_helper @returns status, : string for status (None if okay) message : string for problem for status_set
[ "Check", "that", "services", "listed", "in", "the", "services", "object", "and", "ports", "are", "actually", "closed", "(", "not", "listened", "to", ")", "to", "verify", "that", "the", "unit", "is", "properly", "paused", "." ]
python
train
apache/incubator-heron
heron/instance/src/python/utils/tuple.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/instance/src/python/utils/tuple.py#L62-L74
def make_tuple(stream, tuple_key, values, roots=None): """Creates a HeronTuple :param stream: protobuf message ``StreamId`` :param tuple_key: tuple id :param values: a list of values :param roots: a list of protobuf message ``RootId`` """ component_name = stream.component_name stream_id = stream.id gen_task = roots[0].taskid if roots is not None and len(roots) > 0 else None return HeronTuple(id=str(tuple_key), component=component_name, stream=stream_id, task=gen_task, values=values, creation_time=time.time(), roots=roots)
[ "def", "make_tuple", "(", "stream", ",", "tuple_key", ",", "values", ",", "roots", "=", "None", ")", ":", "component_name", "=", "stream", ".", "component_name", "stream_id", "=", "stream", ".", "id", "gen_task", "=", "roots", "[", "0", "]", ".", "taskid...
Creates a HeronTuple :param stream: protobuf message ``StreamId`` :param tuple_key: tuple id :param values: a list of values :param roots: a list of protobuf message ``RootId``
[ "Creates", "a", "HeronTuple" ]
python
valid
juju/python-libjuju
juju/application.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/application.py#L332-L342
async def set_config(self, config): """Set configuration options for this application. :param config: Dict of configuration to set """ app_facade = client.ApplicationFacade.from_connection(self.connection) log.debug( 'Setting config for %s: %s', self.name, config) return await app_facade.Set(self.name, config)
[ "async", "def", "set_config", "(", "self", ",", "config", ")", ":", "app_facade", "=", "client", ".", "ApplicationFacade", ".", "from_connection", "(", "self", ".", "connection", ")", "log", ".", "debug", "(", "'Setting config for %s: %s'", ",", "self", ".", ...
Set configuration options for this application. :param config: Dict of configuration to set
[ "Set", "configuration", "options", "for", "this", "application", "." ]
python
train
IdentityPython/pysaml2
src/saml2/mdstore.py
https://github.com/IdentityPython/pysaml2/blob/d3aa78eeb7d37c12688f783cb4db1c7263a14ad6/src/saml2/mdstore.py#L388-L428
def certs(self, entity_id, descriptor, use="signing"): ''' Returns certificates for the given Entity ''' ent = self[entity_id] def extract_certs(srvs): res = [] for srv in srvs: if "key_descriptor" in srv: for key in srv["key_descriptor"]: if "use" in key and key["use"] == use: for dat in key["key_info"]["x509_data"]: cert = repack_cert( dat["x509_certificate"]["text"]) if cert not in res: res.append(cert) elif not "use" in key: for dat in key["key_info"]["x509_data"]: cert = repack_cert( dat["x509_certificate"]["text"]) if cert not in res: res.append(cert) return res if descriptor == "any": res = [] for descr in ["spsso", "idpsso", "role", "authn_authority", "attribute_authority", "pdp"]: try: srvs = ent["%s_descriptor" % descr] except KeyError: continue res.extend(extract_certs(srvs)) else: srvs = ent["%s_descriptor" % descriptor] res = extract_certs(srvs) return res
[ "def", "certs", "(", "self", ",", "entity_id", ",", "descriptor", ",", "use", "=", "\"signing\"", ")", ":", "ent", "=", "self", "[", "entity_id", "]", "def", "extract_certs", "(", "srvs", ")", ":", "res", "=", "[", "]", "for", "srv", "in", "srvs", ...
Returns certificates for the given Entity
[ "Returns", "certificates", "for", "the", "given", "Entity" ]
python
train
dmcc/PyStanfordDependencies
StanfordDependencies/StanfordDependencies.py
https://github.com/dmcc/PyStanfordDependencies/blob/43d8f38a19e40087f273330087918c87df6d4d8f/StanfordDependencies/StanfordDependencies.py#L92-L116
def convert_trees(self, ptb_trees, representation='basic', universal=True, include_punct=True, include_erased=False, **kwargs): """Convert a list of Penn Treebank formatted strings (ptb_trees) into Stanford Dependencies. The dependencies are represented as a list of sentences (CoNLL.Corpus), where each sentence (CoNLL.Sentence) is itself a list of CoNLL.Token objects. Currently supported representations are 'basic', 'collapsed', 'CCprocessed', and 'collapsedTree' which behave the same as they in the CoreNLP command line tools. (note that in the online CoreNLP demo, 'collapsed' is called 'enhanced') Additional arguments: universal (if True, use universal dependencies if they're available), include_punct (if False, punctuation tokens will not be included), and include_erased (if False and your representation might erase tokens, those tokens will be omitted from the output). See documentation on your backend to see if it supports further options.""" kwargs.update(representation=representation, universal=universal, include_punct=include_punct, include_erased=include_erased) return Corpus(self.convert_tree(ptb_tree, **kwargs) for ptb_tree in ptb_trees)
[ "def", "convert_trees", "(", "self", ",", "ptb_trees", ",", "representation", "=", "'basic'", ",", "universal", "=", "True", ",", "include_punct", "=", "True", ",", "include_erased", "=", "False", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "update",...
Convert a list of Penn Treebank formatted strings (ptb_trees) into Stanford Dependencies. The dependencies are represented as a list of sentences (CoNLL.Corpus), where each sentence (CoNLL.Sentence) is itself a list of CoNLL.Token objects. Currently supported representations are 'basic', 'collapsed', 'CCprocessed', and 'collapsedTree' which behave the same as they in the CoreNLP command line tools. (note that in the online CoreNLP demo, 'collapsed' is called 'enhanced') Additional arguments: universal (if True, use universal dependencies if they're available), include_punct (if False, punctuation tokens will not be included), and include_erased (if False and your representation might erase tokens, those tokens will be omitted from the output). See documentation on your backend to see if it supports further options.
[ "Convert", "a", "list", "of", "Penn", "Treebank", "formatted", "strings", "(", "ptb_trees", ")", "into", "Stanford", "Dependencies", ".", "The", "dependencies", "are", "represented", "as", "a", "list", "of", "sentences", "(", "CoNLL", ".", "Corpus", ")", "wh...
python
train
NICTA/revrand
revrand/slm.py
https://github.com/NICTA/revrand/blob/4c1881b6c1772d2b988518e49dde954f165acfb6/revrand/slm.py#L219-L244
def predict_moments(self, X): """ Full predictive distribution from Bayesian linear regression. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). Returns ------- Ey : ndarray The expected value of y* for the query inputs, X* of shape (N*,). Vy : ndarray The expected variance of y* for the query inputs, X* of shape (N*,). """ check_is_fitted(self, ['var_', 'regularizer_', 'weights_', 'covariance_', 'hypers_']) X = check_array(X) Phi = self.basis.transform(X, *atleast_list(self.hypers_)) Ey = Phi.dot(self.weights_) Vf = (Phi.dot(self.covariance_) * Phi).sum(axis=1) return Ey, Vf + self.var_
[ "def", "predict_moments", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "[", "'var_'", ",", "'regularizer_'", ",", "'weights_'", ",", "'covariance_'", ",", "'hypers_'", "]", ")", "X", "=", "check_array", "(", "X", ")", "Phi", "=",...
Full predictive distribution from Bayesian linear regression. Parameters ---------- X : ndarray (N*,d) array query input dataset (N* samples, d dimensions). Returns ------- Ey : ndarray The expected value of y* for the query inputs, X* of shape (N*,). Vy : ndarray The expected variance of y* for the query inputs, X* of shape (N*,).
[ "Full", "predictive", "distribution", "from", "Bayesian", "linear", "regression", "." ]
python
train
numenta/htmresearch
htmresearch/regions/RawSensor.py
https://github.com/numenta/htmresearch/blob/70c096b09a577ea0432c3f3bfff4442d4871b7aa/htmresearch/regions/RawSensor.py#L104-L126
def compute(self, inputs, outputs): """ Get the next record from the queue and encode it. The fields for inputs and outputs are as defined in the spec above. """ if len(self.queue) > 0: # Take the top element of the data queue data = self.queue.pop() else: raise Exception("RawSensor: No data to encode: queue is empty ") # Copy data into output vectors outputs["resetOut"][0] = data["reset"] outputs["sequenceIdOut"][0] = data["sequenceId"] outputs["dataOut"][:] = 0 outputs["dataOut"][data["nonZeros"]] = 1 if self.verbosity > 1: print "RawSensor outputs:" print "sequenceIdOut: ", outputs["sequenceIdOut"] print "resetOut: ", outputs["resetOut"] print "dataOut: ", outputs["dataOut"].nonzero()[0]
[ "def", "compute", "(", "self", ",", "inputs", ",", "outputs", ")", ":", "if", "len", "(", "self", ".", "queue", ")", ">", "0", ":", "# Take the top element of the data queue", "data", "=", "self", ".", "queue", ".", "pop", "(", ")", "else", ":", "raise...
Get the next record from the queue and encode it. The fields for inputs and outputs are as defined in the spec above.
[ "Get", "the", "next", "record", "from", "the", "queue", "and", "encode", "it", ".", "The", "fields", "for", "inputs", "and", "outputs", "are", "as", "defined", "in", "the", "spec", "above", "." ]
python
train
jepegit/cellpy
cellpy/utils/batch_tools/batch_plotters.py
https://github.com/jepegit/cellpy/blob/9f4a84cdd11f72cfa02cda8c2d7b5174abbb7370/cellpy/utils/batch_tools/batch_plotters.py#L464-L483
def run_dumper(self, dumper): """run dumber (once pr. engine) Args: dumper: dumper to run (function or method). The dumper takes the attributes experiments, farms, and barn as input. It does not return anything. But can, if the dumper designer feels in a bad and nasty mood, modify the input objects (for example experiments). """ logging.debug("start dumper::") dumper( experiments=self.experiments, farms=self.farms, barn=self.barn, engine=self.current_engine, ) logging.debug("::dumper ended")
[ "def", "run_dumper", "(", "self", ",", "dumper", ")", ":", "logging", ".", "debug", "(", "\"start dumper::\"", ")", "dumper", "(", "experiments", "=", "self", ".", "experiments", ",", "farms", "=", "self", ".", "farms", ",", "barn", "=", "self", ".", "...
run dumber (once pr. engine) Args: dumper: dumper to run (function or method). The dumper takes the attributes experiments, farms, and barn as input. It does not return anything. But can, if the dumper designer feels in a bad and nasty mood, modify the input objects (for example experiments).
[ "run", "dumber", "(", "once", "pr", ".", "engine", ")" ]
python
train
kovacsbalu/WazeRouteCalculator
WazeRouteCalculator/WazeRouteCalculator.py
https://github.com/kovacsbalu/WazeRouteCalculator/blob/13ddb064571bb2bc0ceec51b5b317640b2bc3fb2/WazeRouteCalculator/WazeRouteCalculator.py#L88-L113
def address_to_coords(self, address): """Convert address to coordinates""" base_coords = self.BASE_COORDS[self.region] get_cord = self.COORD_SERVERS[self.region] url_options = { "q": address, "lang": "eng", "origin": "livemap", "lat": base_coords["lat"], "lon": base_coords["lon"] } response = requests.get(self.WAZE_URL + get_cord, params=url_options, headers=self.HEADERS) for response_json in response.json(): if response_json.get('city'): lat = response_json['location']['lat'] lon = response_json['location']['lon'] bounds = response_json['bounds'] # sometimes the coords don't match up if bounds is not None: bounds['top'], bounds['bottom'] = max(bounds['top'], bounds['bottom']), min(bounds['top'], bounds['bottom']) bounds['left'], bounds['right'] = min(bounds['left'], bounds['right']), max(bounds['left'], bounds['right']) else: bounds = {} return {"lat": lat, "lon": lon, "bounds": bounds} raise WRCError("Cannot get coords for %s" % address)
[ "def", "address_to_coords", "(", "self", ",", "address", ")", ":", "base_coords", "=", "self", ".", "BASE_COORDS", "[", "self", ".", "region", "]", "get_cord", "=", "self", ".", "COORD_SERVERS", "[", "self", ".", "region", "]", "url_options", "=", "{", "...
Convert address to coordinates
[ "Convert", "address", "to", "coordinates" ]
python
train
bhmm/bhmm
bhmm/output_models/discrete.py
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/output_models/discrete.py#L130-L157
def p_obs(self, obs, out=None): """ Returns the output probabilities for an entire trajectory and all hidden states Parameters ---------- obs : ndarray((T), dtype=int) a discrete trajectory of length T Return ------ p_o : ndarray (T,N) the probability of generating the symbol at time point t from any of the N hidden states """ if out is None: out = self._output_probabilities[:, obs].T # out /= np.sum(out, axis=1)[:,None] return self._handle_outliers(out) else: if obs.shape[0] == out.shape[0]: np.copyto(out, self._output_probabilities[:, obs].T) elif obs.shape[0] < out.shape[0]: out[:obs.shape[0], :] = self._output_probabilities[:, obs].T else: raise ValueError('output array out is too small: '+str(out.shape[0])+' < '+str(obs.shape[0])) # out /= np.sum(out, axis=1)[:,None] return self._handle_outliers(out)
[ "def", "p_obs", "(", "self", ",", "obs", ",", "out", "=", "None", ")", ":", "if", "out", "is", "None", ":", "out", "=", "self", ".", "_output_probabilities", "[", ":", ",", "obs", "]", ".", "T", "# out /= np.sum(out, axis=1)[:,None]", "return", "self", ...
Returns the output probabilities for an entire trajectory and all hidden states Parameters ---------- obs : ndarray((T), dtype=int) a discrete trajectory of length T Return ------ p_o : ndarray (T,N) the probability of generating the symbol at time point t from any of the N hidden states
[ "Returns", "the", "output", "probabilities", "for", "an", "entire", "trajectory", "and", "all", "hidden", "states" ]
python
train
LonamiWebs/Telethon
telethon/tl/custom/chatgetter.py
https://github.com/LonamiWebs/Telethon/blob/1ead9757d366b58c1e0567cddb0196e20f1a445f/telethon/tl/custom/chatgetter.py#L27-L40
async def get_chat(self): """ Returns `chat`, but will make an API call to find the chat unless it's already cached. """ # See `get_sender` for information about 'min'. if (self._chat is None or getattr(self._chat, 'min', None))\ and await self.get_input_chat(): try: self._chat =\ await self._client.get_entity(self._input_chat) except ValueError: await self._refetch_chat() return self._chat
[ "async", "def", "get_chat", "(", "self", ")", ":", "# See `get_sender` for information about 'min'.", "if", "(", "self", ".", "_chat", "is", "None", "or", "getattr", "(", "self", ".", "_chat", ",", "'min'", ",", "None", ")", ")", "and", "await", "self", "....
Returns `chat`, but will make an API call to find the chat unless it's already cached.
[ "Returns", "chat", "but", "will", "make", "an", "API", "call", "to", "find", "the", "chat", "unless", "it", "s", "already", "cached", "." ]
python
train