repo
stringlengths
7
55
path
stringlengths
4
223
url
stringlengths
87
315
code
stringlengths
75
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
avg_line_len
float64
7.91
980
formwork-io/lazarus
lazarus/__init__.py
https://github.com/formwork-io/lazarus/blob/b2b6120fe06d69c23b4f41d55b6d71860a9fdeaa/lazarus/__init__.py#L119-L210
def default(restart_cb=None, restart_func=None, close_fds=True): '''Sets up lazarus in default mode. See the :py:func:`custom` function for a more powerful mode of use. The default mode of lazarus is to watch all modules rooted at ``PYTHONPATH`` for changes and restart when they take place. Keyword arguments: restart_cb -- Callback invoked prior to restarting the process; allows for any cleanup to occur prior to restarting. Returning anything other than *None* in the callback will cancel the restart. restart_func -- Function invoked to restart the process. This supplants the default behavior of using *sys.executable* and *sys.argv*. close_fds -- Whether all file descriptors other than *stdin*, *stdout*, and *stderr* should be closed A simple example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop() ''' if _active: msg = 'lazarus is already active' raise RuntimeWarning(msg) _python_path = os.getenv('PYTHONPATH') if not _python_path: msg = 'PYTHONPATH is not set' raise RuntimeError(msg) if restart_cb and not callable(restart_cb): msg = 'restart_cb keyword argument is not callable' raise TypeError(msg) if restart_func and not callable(restart_func): msg = 'restart_func keyword argument is not callable' raise TypeError(msg) global _close_fds _close_fds = close_fds try: from watchdog.observers import Observer from watchdog.events import FileSystemEventHandler except ImportError as ie: msg = 'no watchdog support (%s)' % str(ie) raise RuntimeError(msg) class _Handler(FileSystemEventHandler): def __init__(self): self.active = True def dispatch(self, event): if not self.active: return super(_Handler, self).dispatch(event) def all_events(self, event): if is_restart_event(event): cancelled = _restart() if not cancelled: self.active = False def on_created(self, event): self.all_events(event) def on_deleted(self, event): self.all_events(event) def on_modified(self, event): self.all_events(event) def on_moved(self, event): self.all_events(event) global _observer _observer = Observer() handler = _Handler() _observer.schedule(handler, _python_path, recursive=True) global _restart_cb _restart_cb = restart_cb global _restart_func _restart_func = restart_func _activate() _observer.start()
[ "def", "default", "(", "restart_cb", "=", "None", ",", "restart_func", "=", "None", ",", "close_fds", "=", "True", ")", ":", "if", "_active", ":", "msg", "=", "'lazarus is already active'", "raise", "RuntimeWarning", "(", "msg", ")", "_python_path", "=", "os...
Sets up lazarus in default mode. See the :py:func:`custom` function for a more powerful mode of use. The default mode of lazarus is to watch all modules rooted at ``PYTHONPATH`` for changes and restart when they take place. Keyword arguments: restart_cb -- Callback invoked prior to restarting the process; allows for any cleanup to occur prior to restarting. Returning anything other than *None* in the callback will cancel the restart. restart_func -- Function invoked to restart the process. This supplants the default behavior of using *sys.executable* and *sys.argv*. close_fds -- Whether all file descriptors other than *stdin*, *stdout*, and *stderr* should be closed A simple example: >>> import lazarus >>> lazarus.default() >>> lazarus.stop()
[ "Sets", "up", "lazarus", "in", "default", "mode", "." ]
python
train
28.858696
juiceinc/recipe
recipe/shelf.py
https://github.com/juiceinc/recipe/blob/2e60c2242aeaea3029a2274b31bc3a937761e568/recipe/shelf.py#L774-L784
def introspect_table(table): """Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config. """ d = {} for c in table.columns: if isinstance(c.type, String): d[c.name] = {'kind': 'Dimension', 'field': c.name} if isinstance(c.type, (Integer, Float)): d[c.name] = {'kind': 'Metric', 'field': c.name} return d
[ "def", "introspect_table", "(", "table", ")", ":", "d", "=", "{", "}", "for", "c", "in", "table", ".", "columns", ":", "if", "isinstance", "(", "c", ".", "type", ",", "String", ")", ":", "d", "[", "c", ".", "name", "]", "=", "{", "'kind'", ":",...
Given a SQLAlchemy Table object, return a Shelf description suitable for passing to Shelf.from_config.
[ "Given", "a", "SQLAlchemy", "Table", "object", "return", "a", "Shelf", "description", "suitable", "for", "passing", "to", "Shelf", ".", "from_config", "." ]
python
train
36.636364
FactoryBoy/factory_boy
factory/base.py
https://github.com/FactoryBoy/factory_boy/blob/edaa7c7f5a14065b229927903bd7989cc93cd069/factory/base.py#L342-L366
def _check_parameter_dependencies(self, parameters): """Find out in what order parameters should be called.""" # Warning: parameters only provide reverse dependencies; we reverse them into standard dependencies. # deep_revdeps: set of fields a field depend indirectly upon deep_revdeps = collections.defaultdict(set) # Actual, direct dependencies deps = collections.defaultdict(set) for name, parameter in parameters.items(): if isinstance(parameter, declarations.Parameter): field_revdeps = parameter.get_revdeps(parameters) if not field_revdeps: continue deep_revdeps[name] = set.union(*(deep_revdeps[dep] for dep in field_revdeps)) deep_revdeps[name] |= set(field_revdeps) for dep in field_revdeps: deps[dep].add(name) # Check for cyclical dependencies cyclic = [name for name, field_deps in deep_revdeps.items() if name in field_deps] if cyclic: raise errors.CyclicDefinitionError( "Cyclic definition detected on %r; Params around %s" % (self.factory, ', '.join(cyclic))) return deps
[ "def", "_check_parameter_dependencies", "(", "self", ",", "parameters", ")", ":", "# Warning: parameters only provide reverse dependencies; we reverse them into standard dependencies.", "# deep_revdeps: set of fields a field depend indirectly upon", "deep_revdeps", "=", "collections", ".",...
Find out in what order parameters should be called.
[ "Find", "out", "in", "what", "order", "parameters", "should", "be", "called", "." ]
python
train
49.16
icometrix/dicom2nifti
dicom2nifti/compressed_dicom.py
https://github.com/icometrix/dicom2nifti/blob/1462ae5dd979fa3f276fe7a78ceb9b028121536f/dicom2nifti/compressed_dicom.py#L119-L127
def _decompress_dicom(dicom_file, output_file): """ This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion :param input_file: single dicom file to decompress """ gdcmconv_executable = _get_gdcmconv() subprocess.check_output([gdcmconv_executable, '-w', dicom_file, output_file])
[ "def", "_decompress_dicom", "(", "dicom_file", ",", "output_file", ")", ":", "gdcmconv_executable", "=", "_get_gdcmconv", "(", ")", "subprocess", ".", "check_output", "(", "[", "gdcmconv_executable", ",", "'-w'", ",", "dicom_file", ",", "output_file", "]", ")" ]
This function can be used to convert a jpeg compressed image to an uncompressed one for further conversion :param input_file: single dicom file to decompress
[ "This", "function", "can", "be", "used", "to", "convert", "a", "jpeg", "compressed", "image", "to", "an", "uncompressed", "one", "for", "further", "conversion" ]
python
train
38.555556
jaredLunde/redis_structures
redis_structures/__init__.py
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L2126-L2147
def iter(self, start=0, stop=-1, withscores=False, reverse=None): """ Return a range of values from sorted set name between @start and @end sorted in ascending order unless @reverse or :prop:reversed. @start and @end: #int, can be negative, indicating the end of the range. @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs """ reverse = reverse if reverse is not None else self.reversed _loads = self._loads for member in self._client.zrange( self.key_prefix, start=start, end=stop, withscores=withscores, desc=reverse, score_cast_func=self.cast): if withscores: yield (_loads(member[0]), self.cast(member[1])) else: yield _loads(member)
[ "def", "iter", "(", "self", ",", "start", "=", "0", ",", "stop", "=", "-", "1", ",", "withscores", "=", "False", ",", "reverse", "=", "None", ")", ":", "reverse", "=", "reverse", "if", "reverse", "is", "not", "None", "else", "self", ".", "reversed"...
Return a range of values from sorted set name between @start and @end sorted in ascending order unless @reverse or :prop:reversed. @start and @end: #int, can be negative, indicating the end of the range. @withscores: #bool indicates to return the scores along with the members, as a list of |(member, score)| pairs @reverse: #bool indicating whether to sort the results descendingly -> yields members or |(member, score)| #tuple pairs
[ "Return", "a", "range", "of", "values", "from", "sorted", "set", "name", "between", "@start", "and", "@end", "sorted", "in", "ascending", "order", "unless", "@reverse", "or", ":", "prop", ":", "reversed", "." ]
python
train
46.454545
lrq3000/pyFileFixity
pyFileFixity/lib/profilers/visual/pympler/web.py
https://github.com/lrq3000/pyFileFixity/blob/fd5ef23bb13835faf1e3baa773619b86a1cc9bdf/pyFileFixity/lib/profilers/visual/pympler/web.py#L107-L111
def process(): """Get process overview.""" pmi = ProcessMemoryInfo() threads = get_current_threads() return dict(info=pmi, threads=threads)
[ "def", "process", "(", ")", ":", "pmi", "=", "ProcessMemoryInfo", "(", ")", "threads", "=", "get_current_threads", "(", ")", "return", "dict", "(", "info", "=", "pmi", ",", "threads", "=", "threads", ")" ]
Get process overview.
[ "Get", "process", "overview", "." ]
python
train
30.2
pypa/pipenv
pipenv/vendor/requests/cookies.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/requests/cookies.py#L201-L216
def set(self, name, value, **kwargs): """Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains. """ # support client code that unsets cookies by assignment of a None value: if value is None: remove_cookie_by_name(self, name, domain=kwargs.get('domain'), path=kwargs.get('path')) return if isinstance(value, Morsel): c = morsel_to_cookie(value) else: c = create_cookie(name, value, **kwargs) self.set_cookie(c) return c
[ "def", "set", "(", "self", ",", "name", ",", "value", ",", "*", "*", "kwargs", ")", ":", "# support client code that unsets cookies by assignment of a None value:", "if", "value", "is", "None", ":", "remove_cookie_by_name", "(", "self", ",", "name", ",", "domain",...
Dict-like set() that also supports optional domain and path args in order to resolve naming collisions from using one cookie jar over multiple domains.
[ "Dict", "-", "like", "set", "()", "that", "also", "supports", "optional", "domain", "and", "path", "args", "in", "order", "to", "resolve", "naming", "collisions", "from", "using", "one", "cookie", "jar", "over", "multiple", "domains", "." ]
python
train
39.3125
thombashi/SimpleSQLite
simplesqlite/core.py
https://github.com/thombashi/SimpleSQLite/blob/b16f212132b9b98773e68bf7395abc2f60f56fe5/simplesqlite/core.py#L447-L469
def select_as_dict(self, table_name, columns=None, where=None, extra=None): """ Get data in the database and return fetched data as a |OrderedDict| list. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as |OrderedDict| instances. :rtype: |list| of |OrderedDict| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dict` """ return self.select_as_tabledata(table_name, columns, where, extra).as_dict().get(table_name)
[ "def", "select_as_dict", "(", "self", ",", "table_name", ",", "columns", "=", "None", ",", "where", "=", "None", ",", "extra", "=", "None", ")", ":", "return", "self", ".", "select_as_tabledata", "(", "table_name", ",", "columns", ",", "where", ",", "ext...
Get data in the database and return fetched data as a |OrderedDict| list. :param str table_name: |arg_select_table_name| :param list columns: |arg_select_as_xx_columns| :param where: |arg_select_where| :type where: |arg_where_type| :param str extra: |arg_select_extra| :return: Table data as |OrderedDict| instances. :rtype: |list| of |OrderedDict| :raises simplesqlite.NullDatabaseConnectionError: |raises_check_connection| :raises simplesqlite.TableNotFoundError: |raises_verify_table_existence| :raises simplesqlite.OperationalError: |raises_operational_error| :Example: :ref:`example-select-as-dict`
[ "Get", "data", "in", "the", "database", "and", "return", "fetched", "data", "as", "a", "|OrderedDict|", "list", "." ]
python
train
40.217391
opengisch/pum
pum/core/upgrader.py
https://github.com/opengisch/pum/blob/eaf6af92d723ace60b9e982d7f69b98e00606959/pum/core/upgrader.py#L156-L173
def __run_pre_all(self): """Execute the pre-all.py and pre-all.sql files if they exist""" # if the list of delta dirs is [delta1, delta2] the pre scripts of delta2 are # executed before the pre scripts of delta1 for d in reversed(self.dirs): pre_all_py_path = os.path.join(d, 'pre-all.py') if os.path.isfile(pre_all_py_path): print(' Applying pre-all.py...', end=' ') self.__run_py_file(pre_all_py_path, 'pre-all') print('OK') pre_all_sql_path = os.path.join(d, 'pre-all.sql') if os.path.isfile(pre_all_sql_path): print(' Applying pre-all.sql...', end=' ') self.__run_sql_file(pre_all_sql_path) print('OK')
[ "def", "__run_pre_all", "(", "self", ")", ":", "# if the list of delta dirs is [delta1, delta2] the pre scripts of delta2 are", "# executed before the pre scripts of delta1", "for", "d", "in", "reversed", "(", "self", ".", "dirs", ")", ":", "pre_all_py_path", "=", "os", "."...
Execute the pre-all.py and pre-all.sql files if they exist
[ "Execute", "the", "pre", "-", "all", ".", "py", "and", "pre", "-", "all", ".", "sql", "files", "if", "they", "exist" ]
python
train
43.111111
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/gloo/glir.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/gloo/glir.py#L705-L731
def _get_error(self, code, errors, indentation=0): """Get error and show the faulty line + some context Other GLIR implementations may omit this. """ # Init results = [] lines = None if code is not None: lines = [line.strip() for line in code.split('\n')] for error in errors.split('\n'): # Strip; skip empy lines error = error.strip() if not error: continue # Separate line number from description (if we can) linenr, error = self._parse_error(error) if None in (linenr, lines): results.append('%s' % error) else: results.append('on line %i: %s' % (linenr, error)) if linenr > 0 and linenr < len(lines): results.append(' %s' % lines[linenr - 1]) # Add indentation and return results = [' ' * indentation + r for r in results] return '\n'.join(results)
[ "def", "_get_error", "(", "self", ",", "code", ",", "errors", ",", "indentation", "=", "0", ")", ":", "# Init", "results", "=", "[", "]", "lines", "=", "None", "if", "code", "is", "not", "None", ":", "lines", "=", "[", "line", ".", "strip", "(", ...
Get error and show the faulty line + some context Other GLIR implementations may omit this.
[ "Get", "error", "and", "show", "the", "faulty", "line", "+", "some", "context", "Other", "GLIR", "implementations", "may", "omit", "this", "." ]
python
train
36.925926
DataONEorg/d1_python
client_cli/src/d1_cli/impl/command_parser.py
https://github.com/DataONEorg/d1_python/blob/3ac4d4f3ca052d3e8641a6a329cab526c8ddcb0d/client_cli/src/d1_cli/impl/command_parser.py#L419-L428
def do_archive(self, line): """archive <identifier> [identifier ...] Mark one or more existing Science Objects as archived.""" pids = self._split_args(line, 1, -1) self._command_processor.science_object_archive(pids) self._print_info_if_verbose( "Added archive operation for identifier(s) {} to write queue".format( ", ".join(pids) ) )
[ "def", "do_archive", "(", "self", ",", "line", ")", ":", "pids", "=", "self", ".", "_split_args", "(", "line", ",", "1", ",", "-", "1", ")", "self", ".", "_command_processor", ".", "science_object_archive", "(", "pids", ")", "self", ".", "_print_info_if_...
archive <identifier> [identifier ...] Mark one or more existing Science Objects as archived.
[ "archive", "<identifier", ">", "[", "identifier", "...", "]", "Mark", "one", "or", "more", "existing", "Science", "Objects", "as", "archived", "." ]
python
train
41.4
hotdoc/hotdoc
hotdoc/extensions/c/clang/cindex.py
https://github.com/hotdoc/hotdoc/blob/1067cdc8482b585b364a38fb52ca5d904e486280/hotdoc/extensions/c/clang/cindex.py#L1641-L1646
def lexical_parent(self): """Return the lexical parent for this cursor.""" if not hasattr(self, '_lexical_parent'): self._lexical_parent = conf.lib.clang_getCursorLexicalParent(self) return self._lexical_parent
[ "def", "lexical_parent", "(", "self", ")", ":", "if", "not", "hasattr", "(", "self", ",", "'_lexical_parent'", ")", ":", "self", ".", "_lexical_parent", "=", "conf", ".", "lib", ".", "clang_getCursorLexicalParent", "(", "self", ")", "return", "self", ".", ...
Return the lexical parent for this cursor.
[ "Return", "the", "lexical", "parent", "for", "this", "cursor", "." ]
python
train
40.333333
OCA/knowledge
document_page_approval/models/document_page_history.py
https://github.com/OCA/knowledge/blob/77fa06019c989b56ce34839e9f6343577184223a/document_page_approval/models/document_page_history.py#L95-L129
def action_approve(self): """Set a change request as approved.""" for rec in self: if rec.state not in ['draft', 'to approve']: raise UserError( _("Can't approve page in '%s' state.") % rec.state) if not rec.am_i_approver: raise UserError(_( 'You are not authorized to do this.\r\n' 'Only approvers with these groups can approve this: ' ) % ', '.join( [g.display_name for g in rec.page_id.approver_group_ids])) # Update state rec.write({ 'state': 'approved', 'approved_date': fields.datetime.now(), 'approved_uid': self.env.uid, }) # Trigger computed field update rec.page_id._compute_history_head() # Notify state change rec.message_post( subtype='mt_comment', body=_( 'Change request has been approved by %s.' ) % (self.env.user.name) ) # Notify followers a new version is available rec.page_id.message_post( subtype='mt_comment', body=_( 'New version of the document %s approved.' ) % (rec.page_id.name) )
[ "def", "action_approve", "(", "self", ")", ":", "for", "rec", "in", "self", ":", "if", "rec", ".", "state", "not", "in", "[", "'draft'", ",", "'to approve'", "]", ":", "raise", "UserError", "(", "_", "(", "\"Can't approve page in '%s' state.\"", ")", "%", ...
Set a change request as approved.
[ "Set", "a", "change", "request", "as", "approved", "." ]
python
train
40.057143
googleapis/google-cloud-python
bigtable/google/cloud/bigtable/row_set.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/bigtable/google/cloud/bigtable/row_set.py#L112-L123
def _update_message_request(self, message): """Add row keys and row range to given request message :type message: class:`data_messages_v2_pb2.ReadRowsRequest` :param message: The ``ReadRowsRequest`` protobuf """ for each in self.row_keys: message.rows.row_keys.append(_to_bytes(each)) for each in self.row_ranges: r_kwrags = each.get_range_kwargs() message.rows.row_ranges.add(**r_kwrags)
[ "def", "_update_message_request", "(", "self", ",", "message", ")", ":", "for", "each", "in", "self", ".", "row_keys", ":", "message", ".", "rows", ".", "row_keys", ".", "append", "(", "_to_bytes", "(", "each", ")", ")", "for", "each", "in", "self", "....
Add row keys and row range to given request message :type message: class:`data_messages_v2_pb2.ReadRowsRequest` :param message: The ``ReadRowsRequest`` protobuf
[ "Add", "row", "keys", "and", "row", "range", "to", "given", "request", "message" ]
python
train
38.583333
lmjohns3/theanets
theanets/trainer.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/trainer.py#L93-L158
def itertrain(self, train, valid=None, **kwargs): '''Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset. ''' ifci = itertools.chain.from_iterable def first(x): return x[0] if isinstance(x, (tuple, list)) else x def last(x): return x[-1] if isinstance(x, (tuple, list)) else x odim = idim = None for t in train: idim = first(t).shape[-1] odim = last(t).shape[-1] rng = kwargs.get('rng') if rng is None or isinstance(rng, int): rng = np.random.RandomState(rng) # set output (decoding) weights on the network. samples = ifci(last(t) for t in train) for param in self.network.layers[-1].params: shape = param.get_value(borrow=True).shape if len(shape) == 2 and shape[1] == odim: arr = np.vstack(SampleTrainer.reservoir(samples, shape[0], rng)) util.log('setting {}: {}', param.name, shape) param.set_value(arr / np.sqrt((arr * arr).sum(axis=1))[:, None]) # set input (encoding) weights on the network. samples = ifci(first(t) for t in train) for layer in self.network.layers: for param in layer.params: shape = param.get_value(borrow=True).shape if len(shape) == 2 and shape[0] == idim: arr = np.vstack(SampleTrainer.reservoir(samples, shape[1], rng)).T util.log('setting {}: {}', param.name, shape) param.set_value(arr / np.sqrt((arr * arr).sum(axis=0))) samples = ifci(self.network.feed_forward( first(t))[i-1] for t in train) yield dict(loss=0), dict(loss=0)
[ "def", "itertrain", "(", "self", ",", "train", ",", "valid", "=", "None", ",", "*", "*", "kwargs", ")", ":", "ifci", "=", "itertools", ".", "chain", ".", "from_iterable", "def", "first", "(", "x", ")", ":", "return", "x", "[", "0", "]", "if", "is...
Train a model using a training and validation set. This method yields a series of monitor values to the caller. After every iteration, a pair of monitor dictionaries is generated: one evaluated on the training dataset, and another evaluated on the validation dataset. The validation monitors might not be updated during every training iteration; in this case, the most recent validation monitors will be yielded along with the training monitors. Parameters ---------- train : :class:`Dataset <theanets.dataset.Dataset>` A set of training data for computing updates to model parameters. valid : :class:`Dataset <theanets.dataset.Dataset>` A set of validation data for computing monitor values and determining when the loss has stopped improving. Yields ------ training : dict A dictionary mapping monitor names to values, evaluated on the training dataset. validation : dict A dictionary containing monitor values evaluated on the validation dataset.
[ "Train", "a", "model", "using", "a", "training", "and", "validation", "set", "." ]
python
test
42.484848
tmux-python/libtmux
libtmux/window.py
https://github.com/tmux-python/libtmux/blob/8eb2f8bbea3a025c1567b1516653414dbc24e1fc/libtmux/window.py#L153-L187
def set_window_option(self, option, value): """ Wrapper for ``$ tmux set-window-option <option> <value>``. Parameters ---------- option : str option to set, e.g. 'aggressive-resize' value : str window option value. True/False will turn in 'on' and 'off', also accepts string of 'on' or 'off' directly. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption` """ self.server._update_windows() if isinstance(value, bool) and value: value = 'on' elif isinstance(value, bool) and not value: value = 'off' cmd = self.cmd( 'set-window-option', '-t%s:%s' % (self.get('session_id'), self.index), # '-t%s' % self.id, option, value, ) if isinstance(cmd.stderr, list) and len(cmd.stderr): handle_option_error(cmd.stderr[0])
[ "def", "set_window_option", "(", "self", ",", "option", ",", "value", ")", ":", "self", ".", "server", ".", "_update_windows", "(", ")", "if", "isinstance", "(", "value", ",", "bool", ")", "and", "value", ":", "value", "=", "'on'", "elif", "isinstance", ...
Wrapper for ``$ tmux set-window-option <option> <value>``. Parameters ---------- option : str option to set, e.g. 'aggressive-resize' value : str window option value. True/False will turn in 'on' and 'off', also accepts string of 'on' or 'off' directly. Raises ------ :exc:`exc.OptionError`, :exc:`exc.UnknownOption`, :exc:`exc.InvalidOption`, :exc:`exc.AmbiguousOption`
[ "Wrapper", "for", "$", "tmux", "set", "-", "window", "-", "option", "<option", ">", "<value", ">", "." ]
python
train
28.942857
tariqdaouda/pyGeno
pyGeno/Exon.py
https://github.com/tariqdaouda/pyGeno/blob/474b1250bf78ce5c7e7c3bbbfdbad9635d5a7d14/pyGeno/Exon.py#L152-L161
def previousExon(self) : """Returns the previous exon of the transcript, or None if there is none""" if self.number == 0 : return None try : return self.transcript.exons[self.number-1] except IndexError : return None
[ "def", "previousExon", "(", "self", ")", ":", "if", "self", ".", "number", "==", "0", ":", "return", "None", "try", ":", "return", "self", ".", "transcript", ".", "exons", "[", "self", ".", "number", "-", "1", "]", "except", "IndexError", ":", "retur...
Returns the previous exon of the transcript, or None if there is none
[ "Returns", "the", "previous", "exon", "of", "the", "transcript", "or", "None", "if", "there", "is", "none" ]
python
train
23
jenanwise/codequality
codequality/main.py
https://github.com/jenanwise/codequality/blob/8a2bd767fd73091c49a5318fdbfb2b4fff77533d/codequality/main.py#L203-L210
def _should_ignore(self, path): """ Return True iff path should be ignored. """ for ignore in self.options.ignores: if fnmatch.fnmatch(path, ignore): return True return False
[ "def", "_should_ignore", "(", "self", ",", "path", ")", ":", "for", "ignore", "in", "self", ".", "options", ".", "ignores", ":", "if", "fnmatch", ".", "fnmatch", "(", "path", ",", "ignore", ")", ":", "return", "True", "return", "False" ]
Return True iff path should be ignored.
[ "Return", "True", "iff", "path", "should", "be", "ignored", "." ]
python
train
29.375
Fantomas42/django-blog-zinnia
zinnia/sitemaps.py
https://github.com/Fantomas42/django-blog-zinnia/blob/b4949304b104a8e1a7a7a0773cbfd024313c3a15/zinnia/sitemaps.py#L96-L101
def priority(self, item): """ The priority of the item depends of the number of entries published in the cache divided by the maximum of entries. """ return '%.1f' % max(self.cache[item.pk][0] / self.max_entries, 0.1)
[ "def", "priority", "(", "self", ",", "item", ")", ":", "return", "'%.1f'", "%", "max", "(", "self", ".", "cache", "[", "item", ".", "pk", "]", "[", "0", "]", "/", "self", ".", "max_entries", ",", "0.1", ")" ]
The priority of the item depends of the number of entries published in the cache divided by the maximum of entries.
[ "The", "priority", "of", "the", "item", "depends", "of", "the", "number", "of", "entries", "published", "in", "the", "cache", "divided", "by", "the", "maximum", "of", "entries", "." ]
python
train
42
aio-libs/aioftp
aioftp/common.py
https://github.com/aio-libs/aioftp/blob/b45395b1aba41301b898040acade7010e6878a08/aioftp/common.py#L489-L499
async def read(self, count=-1): """ :py:func:`asyncio.coroutine` :py:meth:`aioftp.StreamIO.read` proxy """ await self.wait("read") start = _now() data = await super().read(count) self.append("read", data, start) return data
[ "async", "def", "read", "(", "self", ",", "count", "=", "-", "1", ")", ":", "await", "self", ".", "wait", "(", "\"read\"", ")", "start", "=", "_now", "(", ")", "data", "=", "await", "super", "(", ")", ".", "read", "(", "count", ")", "self", "."...
:py:func:`asyncio.coroutine` :py:meth:`aioftp.StreamIO.read` proxy
[ ":", "py", ":", "func", ":", "asyncio", ".", "coroutine" ]
python
valid
26
ceph/ceph-deploy
vendor.py
https://github.com/ceph/ceph-deploy/blob/86943fcc454cd4c99a86e3493e9e93a59c661fef/vendor.py#L93-L112
def vendorize(vendor_requirements): """ This is the main entry point for vendorizing requirements. It expects a list of tuples that should contain the name of the library and the version. For example, a library ``foo`` with version ``0.0.1`` would look like:: vendor_requirements = [ ('foo', '0.0.1'), ] """ for library in vendor_requirements: if len(library) == 2: name, version = library cmd = None elif len(library) == 3: # a possible cmd we need to run name, version, cmd = library vendor_library(name, version, cmd)
[ "def", "vendorize", "(", "vendor_requirements", ")", ":", "for", "library", "in", "vendor_requirements", ":", "if", "len", "(", "library", ")", "==", "2", ":", "name", ",", "version", "=", "library", "cmd", "=", "None", "elif", "len", "(", "library", ")"...
This is the main entry point for vendorizing requirements. It expects a list of tuples that should contain the name of the library and the version. For example, a library ``foo`` with version ``0.0.1`` would look like:: vendor_requirements = [ ('foo', '0.0.1'), ]
[ "This", "is", "the", "main", "entry", "point", "for", "vendorizing", "requirements", ".", "It", "expects", "a", "list", "of", "tuples", "that", "should", "contain", "the", "name", "of", "the", "library", "and", "the", "version", "." ]
python
train
31.05
Diaoul/subliminal
subliminal/providers/addic7ed.py
https://github.com/Diaoul/subliminal/blob/a952dfb2032eb0fd6eb1eb89f04080923c11c4cf/subliminal/providers/addic7ed.py#L148-L184
def _search_show_id(self, series, year=None): """Search the show id from the `series` and `year`. :param str series: series of the episode. :param year: year of the series, if any. :type year: int :return: the show id, if found. :rtype: int """ # addic7ed doesn't support search with quotes series = series.replace('\'', ' ') # build the params series_year = '%s %d' % (series, year) if year is not None else series params = {'search': series_year, 'Submit': 'Search'} # make the search logger.info('Searching show ids with %r', params) r = self.session.get(self.server_url + 'search.php', params=params, timeout=10) r.raise_for_status() if r.status_code == 304: raise TooManyRequests() soup = ParserBeautifulSoup(r.content, ['lxml', 'html.parser']) # get the suggestion suggestion = soup.select('span.titulo > a[href^="/show/"]') if not suggestion: logger.warning('Show id not found: no suggestion') return None if not sanitize(suggestion[0].i.text.replace('\'', ' ')) == sanitize(series_year): logger.warning('Show id not found: suggestion does not match') return None show_id = int(suggestion[0]['href'][6:]) logger.debug('Found show id %d', show_id) return show_id
[ "def", "_search_show_id", "(", "self", ",", "series", ",", "year", "=", "None", ")", ":", "# addic7ed doesn't support search with quotes", "series", "=", "series", ".", "replace", "(", "'\\''", ",", "' '", ")", "# build the params", "series_year", "=", "'%s %d'", ...
Search the show id from the `series` and `year`. :param str series: series of the episode. :param year: year of the series, if any. :type year: int :return: the show id, if found. :rtype: int
[ "Search", "the", "show", "id", "from", "the", "series", "and", "year", "." ]
python
train
37.783784
rodynnz/xccdf
src/xccdf/models/description.py
https://github.com/rodynnz/xccdf/blob/1b9dc2f06b5cce8db2a54c5f95a8f6bcf5cb6981/src/xccdf/models/description.py#L51-L67
def update_xml_element(self): """ Updates the xml element contents to matches the instance contents. :returns: Updated XML element. :rtype: lxml.etree._Element """ super(Description, self).update_xml_element() if hasattr(self, 'lang'): self.xml_element.set( '{http://www.w3.org/XML/1998/namespace}lang', self.lang) if hasattr(self, 'override'): self.xml_element.set('override', str(self.override)) return self.xml_element
[ "def", "update_xml_element", "(", "self", ")", ":", "super", "(", "Description", ",", "self", ")", ".", "update_xml_element", "(", ")", "if", "hasattr", "(", "self", ",", "'lang'", ")", ":", "self", ".", "xml_element", ".", "set", "(", "'{http://www.w3.org...
Updates the xml element contents to matches the instance contents. :returns: Updated XML element. :rtype: lxml.etree._Element
[ "Updates", "the", "xml", "element", "contents", "to", "matches", "the", "instance", "contents", "." ]
python
train
30.647059
awslabs/sockeye
sockeye/inference.py
https://github.com/awslabs/sockeye/blob/5d64a1ee1ef3cbba17c6d1d94bc061020c43f6ab/sockeye/inference.py#L1748-L1770
def _translate_nd(self, source: mx.nd.NDArray, source_length: int, restrict_lexicon: Optional[lexicon.TopKLexicon], raw_constraints: List[Optional[constrained.RawConstraintList]], raw_avoid_list: List[Optional[constrained.RawConstraintList]], max_output_lengths: mx.nd.NDArray) -> List[Translation]: """ Translates source of source_length, given a bucket_key. :param source: Source ids. Shape: (batch_size, bucket_key, num_factors). :param source_length: Bucket key. :param restrict_lexicon: Lexicon to use for vocabulary restriction. :param raw_constraints: A list of optional constraint lists. :return: Sequence of translations. """ return self._get_best_from_beam(*self._beam_search(source, source_length, restrict_lexicon, raw_constraints, raw_avoid_list, max_output_lengths))
[ "def", "_translate_nd", "(", "self", ",", "source", ":", "mx", ".", "nd", ".", "NDArray", ",", "source_length", ":", "int", ",", "restrict_lexicon", ":", "Optional", "[", "lexicon", ".", "TopKLexicon", "]", ",", "raw_constraints", ":", "List", "[", "Option...
Translates source of source_length, given a bucket_key. :param source: Source ids. Shape: (batch_size, bucket_key, num_factors). :param source_length: Bucket key. :param restrict_lexicon: Lexicon to use for vocabulary restriction. :param raw_constraints: A list of optional constraint lists. :return: Sequence of translations.
[ "Translates", "source", "of", "source_length", "given", "a", "bucket_key", "." ]
python
train
54.73913
brocade/pynos
pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_7/ver_7_1_0/yang/brocade_fabric_service.py#L797-L808
def show_fabric_trunk_info_input_rbridge_id(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") show_fabric_trunk_info = ET.Element("show_fabric_trunk_info") config = show_fabric_trunk_info input = ET.SubElement(show_fabric_trunk_info, "input") rbridge_id = ET.SubElement(input, "rbridge-id") rbridge_id.text = kwargs.pop('rbridge_id') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "show_fabric_trunk_info_input_rbridge_id", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "show_fabric_trunk_info", "=", "ET", ".", "Element", "(", "\"show_fabric_trunk_info\"", ")", "config", ...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
41.75
marcomusy/vtkplotter
vtkplotter/addons.py
https://github.com/marcomusy/vtkplotter/blob/692c3396782722ec525bc1346a26999868c650c6/vtkplotter/addons.py#L133-L245
def addScalarBar3D( obj=None, at=0, pos=(0, 0, 0), normal=(0, 0, 1), sx=0.1, sy=2, nlabels=9, ncols=256, cmap=None, c=None, alpha=1, ): """Draw a 3D scalar bar. ``obj`` input can be: - a list of numbers, - a list of two numbers in the form `(min, max)`, - a ``vtkActor`` already containing a set of scalars associated to vertices or cells, - if ``None`` the last actor in the list of actors will be used. .. hint:: |scalbar| |mesh_coloring.py|_ """ from vtk.util.numpy_support import vtk_to_numpy, numpy_to_vtk vp = settings.plotter_instance if c is None: # automatic black or white c = (0.8, 0.8, 0.8) if numpy.sum(colors.getColor(vp.backgrcol)) > 1.5: c = (0.2, 0.2, 0.2) c = colors.getColor(c) gap = 0.4 # space btw nrs and scale vtkscalars_name = "" if obj is None: obj = vp.lastActor() if isinstance(obj, vtk.vtkActor): poly = obj.GetMapper().GetInput() vtkscalars = poly.GetPointData().GetScalars() if vtkscalars is None: vtkscalars = poly.GetCellData().GetScalars() if vtkscalars is None: print("Error in addScalarBar3D: actor has no scalar array.", [obj]) exit() npscalars = vtk_to_numpy(vtkscalars) vmin, vmax = numpy.min(npscalars), numpy.max(npscalars) vtkscalars_name = vtkscalars.GetName().split("_")[-1] elif utils.isSequence(obj): vmin, vmax = numpy.min(obj), numpy.max(obj) vtkscalars_name = "jet" else: print("Error in addScalarBar3D(): input must be vtkActor or list.", type(obj)) exit() if cmap is None: cmap = vtkscalars_name # build the color scale part scale = shapes.Grid([-sx * gap, 0, 0], c=c, alpha=alpha, sx=sx, sy=sy, resx=1, resy=ncols) scale.GetProperty().SetRepresentationToSurface() cscals = scale.cellCenters()[:, 1] def _cellColors(scale, scalars, cmap, alpha): mapper = scale.GetMapper() cpoly = mapper.GetInput() n = len(scalars) lut = vtk.vtkLookupTable() lut.SetNumberOfTableValues(n) lut.Build() for i in range(n): r, g, b = colors.colorMap(i, cmap, 0, n) lut.SetTableValue(i, r, g, b, alpha) arr = numpy_to_vtk(numpy.ascontiguousarray(scalars), deep=True) vmin, vmax = numpy.min(scalars), numpy.max(scalars) mapper.SetScalarRange(vmin, vmax) mapper.SetLookupTable(lut) mapper.ScalarVisibilityOn() cpoly.GetCellData().SetScalars(arr) _cellColors(scale, cscals, cmap, alpha) # build text nlabels = numpy.min([nlabels, ncols]) tlabs = numpy.linspace(vmin, vmax, num=nlabels, endpoint=True) tacts = [] prec = (vmax - vmin) / abs(vmax + vmin) * 2 prec = int(3 + abs(numpy.log10(prec + 1))) for i, t in enumerate(tlabs): tx = utils.precision(t, prec) y = -sy / 1.98 + sy * i / (nlabels - 1) a = shapes.Text(tx, pos=[sx * gap, y, 0], s=sy / 50, c=c, alpha=alpha, depth=0) a.PickableOff() tacts.append(a) sact = Assembly([scale] + tacts) nax = numpy.linalg.norm(normal) if nax: normal = numpy.array(normal) / nax theta = numpy.arccos(normal[2]) phi = numpy.arctan2(normal[1], normal[0]) sact.RotateZ(phi * 57.3) sact.RotateY(theta * 57.3) sact.SetPosition(pos) if not vp.renderers[at]: save_int = vp.interactive vp.show(interactive=0) vp.interactive = save_int vp.renderers[at].AddActor(sact) vp.renderers[at].Render() sact.PickableOff() vp.scalarbars.append(sact) if isinstance(obj, Actor): obj.scalarbar_actor = sact return sact
[ "def", "addScalarBar3D", "(", "obj", "=", "None", ",", "at", "=", "0", ",", "pos", "=", "(", "0", ",", "0", ",", "0", ")", ",", "normal", "=", "(", "0", ",", "0", ",", "1", ")", ",", "sx", "=", "0.1", ",", "sy", "=", "2", ",", "nlabels", ...
Draw a 3D scalar bar. ``obj`` input can be: - a list of numbers, - a list of two numbers in the form `(min, max)`, - a ``vtkActor`` already containing a set of scalars associated to vertices or cells, - if ``None`` the last actor in the list of actors will be used. .. hint:: |scalbar| |mesh_coloring.py|_
[ "Draw", "a", "3D", "scalar", "bar", "." ]
python
train
32.646018
a1ezzz/wasp-general
wasp_general/task/dependency.py
https://github.com/a1ezzz/wasp-general/blob/1029839d33eb663f8dec76c1c46754d53c1de4a9/wasp_general/task/dependency.py#L323-L335
def registry_storage(cls): """ Get registry storage :return: WTaskDependencyRegistryStorage """ if cls.__registry_storage__ is None: raise ValueError('__registry_storage__ must be defined') if isinstance(cls.__registry_storage__, WTaskDependencyRegistryStorage) is False: raise TypeError( "Property '__registry_storage__' is invalid (must derived from WTaskRegistryBase)" ) return cls.__registry_storage__
[ "def", "registry_storage", "(", "cls", ")", ":", "if", "cls", ".", "__registry_storage__", "is", "None", ":", "raise", "ValueError", "(", "'__registry_storage__ must be defined'", ")", "if", "isinstance", "(", "cls", ".", "__registry_storage__", ",", "WTaskDependenc...
Get registry storage :return: WTaskDependencyRegistryStorage
[ "Get", "registry", "storage" ]
python
train
32.307692
balloob/pychromecast
pychromecast/controllers/multizone.py
https://github.com/balloob/pychromecast/blob/831b09c4fed185a7bffe0ea330b7849d5f4e36b6/pychromecast/controllers/multizone.py#L200-L247
def receive_message(self, message, data): # noqa: E501 pylint: disable=too-many-return-statements """ Called when a multizone message is received. """ if data[MESSAGE_TYPE] == TYPE_DEVICE_ADDED: uuid = data['device']['deviceId'] name = data['device']['name'] self._add_member(uuid, name) return True if data[MESSAGE_TYPE] == TYPE_DEVICE_REMOVED: uuid = data['deviceId'] self._remove_member(uuid) return True if data[MESSAGE_TYPE] == TYPE_DEVICE_UPDATED: uuid = data['device']['deviceId'] name = data['device']['name'] self._add_member(uuid, name) return True if data[MESSAGE_TYPE] == TYPE_MULTIZONE_STATUS: members = data['status']['devices'] members = \ {member['deviceId']: member['name'] for member in members} removed_members = \ list(set(self._members.keys())-set(members.keys())) added_members = list(set(members.keys())-set(self._members.keys())) _LOGGER.debug("(%s) Added members %s, Removed members: %s", self._uuid, added_members, removed_members) for uuid in removed_members: self._remove_member(uuid) for uuid in added_members: self._add_member(uuid, members[uuid]) for listener in list(self._status_listeners): listener.multizone_status_received() return True if data[MESSAGE_TYPE] == TYPE_SESSION_UPDATED: # A temporary group has been formed return True if data[MESSAGE_TYPE] == TYPE_CASTING_GROUPS: # Answer to GET_CASTING_GROUPS return True return False
[ "def", "receive_message", "(", "self", ",", "message", ",", "data", ")", ":", "# noqa: E501 pylint: disable=too-many-return-statements", "if", "data", "[", "MESSAGE_TYPE", "]", "==", "TYPE_DEVICE_ADDED", ":", "uuid", "=", "data", "[", "'device'", "]", "[", "'devic...
Called when a multizone message is received.
[ "Called", "when", "a", "multizone", "message", "is", "received", "." ]
python
train
37.291667
inspirehep/inspire-utils
inspire_utils/logging.py
https://github.com/inspirehep/inspire-utils/blob/b0b5983c58700735dfde75e4c8bd32834f2473d4/inspire_utils/logging.py#L36-L42
def error(self, message, *args, **kwargs): """Log error with stack trace and locals information. By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry. """ kwargs.setdefault('extra', {}).setdefault('stack', True) return self.logger.error(message, *args, **kwargs)
[ "def", "error", "(", "self", ",", "message", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'extra'", ",", "{", "}", ")", ".", "setdefault", "(", "'stack'", ",", "True", ")", "return", "self", ".", "logger",...
Log error with stack trace and locals information. By default, enables stack trace information in logging messages, so that stacktrace and locals appear in Sentry.
[ "Log", "error", "with", "stack", "trace", "and", "locals", "information", "." ]
python
train
50.857143
ella/ella
ella/photos/formatter.py
https://github.com/ella/ella/blob/4a1414991f649dc21c4b777dc6b41a922a13faa7/ella/photos/formatter.py#L180-L189
def resize(self): """ Get target size for a cropped image and do the resizing if we got anything usable. """ resized_size = self.get_resized_size() if not resized_size: return self.image = self.image.resize(resized_size, Image.ANTIALIAS)
[ "def", "resize", "(", "self", ")", ":", "resized_size", "=", "self", ".", "get_resized_size", "(", ")", "if", "not", "resized_size", ":", "return", "self", ".", "image", "=", "self", ".", "image", ".", "resize", "(", "resized_size", ",", "Image", ".", ...
Get target size for a cropped image and do the resizing if we got anything usable.
[ "Get", "target", "size", "for", "a", "cropped", "image", "and", "do", "the", "resizing", "if", "we", "got", "anything", "usable", "." ]
python
train
29.7
ironfroggy/django-better-cache
bettercache/utils.py
https://github.com/ironfroggy/django-better-cache/blob/5350e8c646cef1c1ca74eab176f856ddd9eaf5c3/bettercache/utils.py#L198-L205
def strip_wsgi(request): """Strip WSGI data out of the request META data.""" meta = copy(request.META) for key in meta: if key[:4] == 'wsgi': meta[key] = None return meta
[ "def", "strip_wsgi", "(", "request", ")", ":", "meta", "=", "copy", "(", "request", ".", "META", ")", "for", "key", "in", "meta", ":", "if", "key", "[", ":", "4", "]", "==", "'wsgi'", ":", "meta", "[", "key", "]", "=", "None", "return", "meta" ]
Strip WSGI data out of the request META data.
[ "Strip", "WSGI", "data", "out", "of", "the", "request", "META", "data", "." ]
python
train
25
jssimporter/python-jss
jss/jssobject.py
https://github.com/jssimporter/python-jss/blob/b95185d74e0c0531b0b563f280d4129e21d5fe5d/jss/jssobject.py#L369-L389
def _handle_location(self, location): """Return an element located at location with flexible args. Args: location: String xpath to use in an Element.find search OR an Element (which is simply returned). Returns: The found Element. Raises: ValueError if the location is a string that results in a find of None. """ if not isinstance(location, ElementTree.Element): element = self.find(location) if element is None: raise ValueError("Invalid path!") else: element = location return element
[ "def", "_handle_location", "(", "self", ",", "location", ")", ":", "if", "not", "isinstance", "(", "location", ",", "ElementTree", ".", "Element", ")", ":", "element", "=", "self", ".", "find", "(", "location", ")", "if", "element", "is", "None", ":", ...
Return an element located at location with flexible args. Args: location: String xpath to use in an Element.find search OR an Element (which is simply returned). Returns: The found Element. Raises: ValueError if the location is a string that results in a find of None.
[ "Return", "an", "element", "located", "at", "location", "with", "flexible", "args", "." ]
python
train
30.952381
PythonicNinja/pydrill
pydrill/connection/base.py
https://github.com/PythonicNinja/pydrill/blob/0713e78c84d44cd438018e4ba1588a8e242f78c4/pydrill/connection/base.py#L85-L97
def log_request_fail(self, method, full_url, body, duration, status_code=None, exception=None): """ Log an unsuccessful API call. """ logger.warning( '%s %s [status:%s request:%.3fs]', method, full_url, status_code or 'N/A', duration, exc_info=exception is not None ) if body and not isinstance(body, dict): body = body.decode('utf-8') logger.debug('> %s', body)
[ "def", "log_request_fail", "(", "self", ",", "method", ",", "full_url", ",", "body", ",", "duration", ",", "status_code", "=", "None", ",", "exception", "=", "None", ")", ":", "logger", ".", "warning", "(", "'%s %s [status:%s request:%.3fs]'", ",", "method", ...
Log an unsuccessful API call.
[ "Log", "an", "unsuccessful", "API", "call", "." ]
python
train
34.153846
dead-beef/markovchain
markovchain/text/util.py
https://github.com/dead-beef/markovchain/blob/9bd10b2f01089341c4a875a0fa569d50caba22c7/markovchain/text/util.py#L198-L220
def re_flags_str(flags, custom_flags): """Convert regexp flags to string. Parameters ---------- flags : `int` Flags. custom_flags : `int` Custom flags. Returns ------- `str` Flag string. """ res = '' for flag in RE_FLAGS: if flags & getattr(re, flag): res += flag for flag in RE_CUSTOM_FLAGS: if custom_flags & getattr(ReFlags, flag): res += flag return res
[ "def", "re_flags_str", "(", "flags", ",", "custom_flags", ")", ":", "res", "=", "''", "for", "flag", "in", "RE_FLAGS", ":", "if", "flags", "&", "getattr", "(", "re", ",", "flag", ")", ":", "res", "+=", "flag", "for", "flag", "in", "RE_CUSTOM_FLAGS", ...
Convert regexp flags to string. Parameters ---------- flags : `int` Flags. custom_flags : `int` Custom flags. Returns ------- `str` Flag string.
[ "Convert", "regexp", "flags", "to", "string", "." ]
python
train
19.695652
tensorflow/tensor2tensor
tensor2tensor/data_generators/librispeech.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/data_generators/librispeech.py#L63-L85
def _collect_data(directory, input_ext, transcription_ext): """Traverses directory collecting input and target files.""" # Directory from string to tuple pair of strings # key: the filepath to a datafile including the datafile's basename. Example, # if the datafile was "/path/to/datafile.wav" then the key would be # "/path/to/datafile" # value: a pair of strings (media_filepath, label) data_files = {} for root, _, filenames in os.walk(directory): transcripts = [filename for filename in filenames if transcription_ext in filename] for transcript in transcripts: transcript_path = os.path.join(root, transcript) with open(transcript_path, "r") as transcript_file: for transcript_line in transcript_file: line_contents = transcript_line.strip().split(" ", 1) media_base, label = line_contents key = os.path.join(root, media_base) assert key not in data_files media_name = "%s.%s"%(media_base, input_ext) media_path = os.path.join(root, media_name) data_files[key] = (media_base, media_path, label) return data_files
[ "def", "_collect_data", "(", "directory", ",", "input_ext", ",", "transcription_ext", ")", ":", "# Directory from string to tuple pair of strings", "# key: the filepath to a datafile including the datafile's basename. Example,", "# if the datafile was \"/path/to/datafile.wav\" then the key...
Traverses directory collecting input and target files.
[ "Traverses", "directory", "collecting", "input", "and", "target", "files", "." ]
python
train
49.304348
robinandeer/puzzle
puzzle/models/variant.py
https://github.com/robinandeer/puzzle/blob/9476f05b416d3a5135d25492cb31411fdf831c58/puzzle/models/variant.py#L159-L170
def add_compound(self, compound): """Add the information of a compound variant This adds a compound dict to variant['compounds'] Args: compound (dict): A compound dictionary """ logger.debug("Adding compound {0} to variant {1}".format( compound, self['variant_id'])) self['compounds'].append(compound)
[ "def", "add_compound", "(", "self", ",", "compound", ")", ":", "logger", ".", "debug", "(", "\"Adding compound {0} to variant {1}\"", ".", "format", "(", "compound", ",", "self", "[", "'variant_id'", "]", ")", ")", "self", "[", "'compounds'", "]", ".", "appe...
Add the information of a compound variant This adds a compound dict to variant['compounds'] Args: compound (dict): A compound dictionary
[ "Add", "the", "information", "of", "a", "compound", "variant" ]
python
train
31.416667
priestc/moneywagon
moneywagon/__init__.py
https://github.com/priestc/moneywagon/blob/00518f1f557dcca8b3031f46d3564c2baa0227a3/moneywagon/__init__.py#L637-L661
def service_table(format='simple', authenticated=False): """ Returns a string depicting all services currently installed. """ if authenticated: all_services = ExchangeUniverse.get_authenticated_services() else: all_services = ALL_SERVICES if format == 'html': linkify = lambda x: "<a href='{0}' target='_blank'>{0}</a>".format(x) else: linkify = lambda x: x ret = [] for service in sorted(all_services, key=lambda x: x.service_id): ret.append([ service.service_id, service.__name__, linkify(service.api_homepage.format( domain=service.domain, protocol=service.protocol )), ", ".join(service.supported_cryptos or []) ]) return tabulate(ret, headers=['ID', 'Name', 'URL', 'Supported Currencies'], tablefmt=format)
[ "def", "service_table", "(", "format", "=", "'simple'", ",", "authenticated", "=", "False", ")", ":", "if", "authenticated", ":", "all_services", "=", "ExchangeUniverse", ".", "get_authenticated_services", "(", ")", "else", ":", "all_services", "=", "ALL_SERVICES"...
Returns a string depicting all services currently installed.
[ "Returns", "a", "string", "depicting", "all", "services", "currently", "installed", "." ]
python
train
33.68
UCL-INGI/INGInious
inginious/frontend/pages/course.py
https://github.com/UCL-INGI/INGInious/blob/cbda9a9c7f2b8e8eb1e6d7d51f0d18092086300c/inginious/frontend/pages/course.py#L24-L33
def POST(self, courseid): # pylint: disable=arguments-differ """ POST request """ course = self.get_course(courseid) user_input = web.input() if "unregister" in user_input and course.allow_unregister(): self.user_manager.course_unregister_user(course, self.user_manager.session_username()) raise web.seeother(self.app.get_homepath() + '/mycourses') return self.show_page(course)
[ "def", "POST", "(", "self", ",", "courseid", ")", ":", "# pylint: disable=arguments-differ", "course", "=", "self", ".", "get_course", "(", "courseid", ")", "user_input", "=", "web", ".", "input", "(", ")", "if", "\"unregister\"", "in", "user_input", "and", ...
POST request
[ "POST", "request" ]
python
train
43.6
chrisjsewell/jsonextended
jsonextended/edict.py
https://github.com/chrisjsewell/jsonextended/blob/c3a7a880cc09789b3c61204265dcbb127be76c8a/jsonextended/edict.py#L66-L73
def is_list_of_dict_like(obj, attr=('keys', 'items')): """test if object is a list only containing dict like items """ try: if len(obj) == 0: return False return all([is_dict_like(i, attr) for i in obj]) except Exception: return False
[ "def", "is_list_of_dict_like", "(", "obj", ",", "attr", "=", "(", "'keys'", ",", "'items'", ")", ")", ":", "try", ":", "if", "len", "(", "obj", ")", "==", "0", ":", "return", "False", "return", "all", "(", "[", "is_dict_like", "(", "i", ",", "attr"...
test if object is a list only containing dict like items
[ "test", "if", "object", "is", "a", "list", "only", "containing", "dict", "like", "items" ]
python
train
34.375
BlueBrain/NeuroM
neurom/geom/transform.py
https://github.com/BlueBrain/NeuroM/blob/254bb73535b20053d175bc4725bade662177d12b/neurom/geom/transform.py#L128-L146
def rotate(obj, axis, angle, origin=None): ''' Rotation around unit vector following the right hand rule Parameters: obj : obj to be rotated (e.g. neurite, neuron). Must implement a transform method. axis : unit vector for the axis of rotation angle : rotation angle in rads Returns: A copy of the object with the applied translation. ''' R = _rodrigues_to_dcm(axis, angle) try: return obj.transform(PivotRotation(R, origin)) except AttributeError: raise NotImplementedError
[ "def", "rotate", "(", "obj", ",", "axis", ",", "angle", ",", "origin", "=", "None", ")", ":", "R", "=", "_rodrigues_to_dcm", "(", "axis", ",", "angle", ")", "try", ":", "return", "obj", ".", "transform", "(", "PivotRotation", "(", "R", ",", "origin",...
Rotation around unit vector following the right hand rule Parameters: obj : obj to be rotated (e.g. neurite, neuron). Must implement a transform method. axis : unit vector for the axis of rotation angle : rotation angle in rads Returns: A copy of the object with the applied translation.
[ "Rotation", "around", "unit", "vector", "following", "the", "right", "hand", "rule" ]
python
train
29
openid/JWTConnect-Python-CryptoJWT
src/cryptojwt/key_jar.py
https://github.com/openid/JWTConnect-Python-CryptoJWT/blob/8863cfbfe77ca885084870b234a66b55bd52930c/src/cryptojwt/key_jar.py#L148-L230
def get(self, key_use, key_type="", owner="", kid=None, **kwargs): """ Get all keys that matches a set of search criteria :param key_use: A key useful for this usage (enc, dec, sig, ver) :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :return: A possibly empty list of keys """ if key_use in ["dec", "enc"]: use = "enc" else: use = "sig" _kj = None if owner != "": try: _kj = self.issuer_keys[owner] except KeyError: if owner.endswith("/"): try: _kj = self.issuer_keys[owner[:-1]] except KeyError: pass else: try: _kj = self.issuer_keys[owner + "/"] except KeyError: pass else: try: _kj = self.issuer_keys[owner] except KeyError: pass if _kj is None: return [] lst = [] for bundle in _kj: if key_type: if key_use in ['ver', 'dec']: _bkeys = bundle.get(key_type, only_active=False) else: _bkeys = bundle.get(key_type) else: _bkeys = bundle.keys() for key in _bkeys: if key.inactive_since and key_use != "sig": # Skip inactive keys unless for signature verification continue if not key.use or use == key.use: if kid: if key.kid == kid: lst.append(key) break else: continue else: lst.append(key) # if elliptic curve, have to check if I have a key of the right curve if key_type == "EC" and "alg" in kwargs: name = "P-{}".format(kwargs["alg"][2:]) # the type _lst = [] for key in lst: if name != key.crv: continue _lst.append(key) lst = _lst if use == 'enc' and key_type == 'oct' and owner != '': # Add my symmetric keys for kb in self.issuer_keys['']: for key in kb.get(key_type): if key.inactive_since: continue if not key.use or key.use == use: lst.append(key) return lst
[ "def", "get", "(", "self", ",", "key_use", ",", "key_type", "=", "\"\"", ",", "owner", "=", "\"\"", ",", "kid", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "key_use", "in", "[", "\"dec\"", ",", "\"enc\"", "]", ":", "use", "=", "\"enc\""...
Get all keys that matches a set of search criteria :param key_use: A key useful for this usage (enc, dec, sig, ver) :param key_type: Type of key (rsa, ec, oct, ..) :param owner: Who is the owner of the keys, "" == me (default) :param kid: A Key Identifier :return: A possibly empty list of keys
[ "Get", "all", "keys", "that", "matches", "a", "set", "of", "search", "criteria" ]
python
train
32.638554
gitpython-developers/GitPython
git/index/base.py
https://github.com/gitpython-developers/GitPython/blob/1f66e25c25cde2423917ee18c4704fff83b837d1/git/index/base.py#L557-L566
def _to_relative_path(self, path): """:return: Version of path relative to our git directory or raise ValueError if it is not within our git direcotory""" if not osp.isabs(path): return path if self.repo.bare: raise InvalidGitRepositoryError("require non-bare repository") if not path.startswith(self.repo.working_tree_dir): raise ValueError("Absolute path %r is not in git repository at %r" % (path, self.repo.working_tree_dir)) return os.path.relpath(path, self.repo.working_tree_dir)
[ "def", "_to_relative_path", "(", "self", ",", "path", ")", ":", "if", "not", "osp", ".", "isabs", "(", "path", ")", ":", "return", "path", "if", "self", ".", "repo", ".", "bare", ":", "raise", "InvalidGitRepositoryError", "(", "\"require non-bare repository\...
:return: Version of path relative to our git directory or raise ValueError if it is not within our git direcotory
[ ":", "return", ":", "Version", "of", "path", "relative", "to", "our", "git", "directory", "or", "raise", "ValueError", "if", "it", "is", "not", "within", "our", "git", "direcotory" ]
python
train
56.1
ARMmbed/icetea
icetea_lib/ResourceProvider/ResourceConfig.py
https://github.com/ARMmbed/icetea/blob/b2b97ac607429830cf7d62dae2e3903692c7c778/icetea_lib/ResourceProvider/ResourceConfig.py#L41-L47
def _hardware_count(self): """ Amount of hardware resources. :return: integer """ return self._counts.get("hardware") + self._counts.get("serial") + self._counts.get("mbed")
[ "def", "_hardware_count", "(", "self", ")", ":", "return", "self", ".", "_counts", ".", "get", "(", "\"hardware\"", ")", "+", "self", ".", "_counts", ".", "get", "(", "\"serial\"", ")", "+", "self", ".", "_counts", ".", "get", "(", "\"mbed\"", ")" ]
Amount of hardware resources. :return: integer
[ "Amount", "of", "hardware", "resources", "." ]
python
train
29.714286
googleads/googleads-python-lib
examples/adwords/v201809/advanced_operations/add_dynamic_page_feed.py
https://github.com/googleads/googleads-python-lib/blob/aa3b1b474b0f9789ca55ca46f4b2b57aeae38874/examples/adwords/v201809/advanced_operations/add_dynamic_page_feed.py#L98-L131
def _CreateFeedMapping(client, feed_details): """Creates the feed mapping for DSA page feeds. Args: client: an AdWordsClient instance. feed_details: a _DSAFeedDetails instance. """ # Get the FeedMappingService. feed_mapping_service = client.GetService('FeedMappingService', version='v201809') # Create the operation. operation = { # Create the feed mapping. 'operand': { 'criterionType': DSA_PAGE_FEED_CRITERION_TYPE, 'feedId': feed_details.feed_id, # Map the feedAttributeIds to the fieldId constants. 'attributeFieldMappings': [ { 'feedAttributeId': feed_details.url_attribute_id, 'fieldId': DSA_PAGE_URLS_FIELD_ID }, { 'feedAttributeId': feed_details.label_attribute_id, 'fieldId': DSA_LABEL_FIELD_ID } ] }, 'operator': 'ADD' } # Add the feed mapping. feed_mapping_service.mutate([operation])
[ "def", "_CreateFeedMapping", "(", "client", ",", "feed_details", ")", ":", "# Get the FeedMappingService.", "feed_mapping_service", "=", "client", ".", "GetService", "(", "'FeedMappingService'", ",", "version", "=", "'v201809'", ")", "# Create the operation.", "operation"...
Creates the feed mapping for DSA page feeds. Args: client: an AdWordsClient instance. feed_details: a _DSAFeedDetails instance.
[ "Creates", "the", "feed", "mapping", "for", "DSA", "page", "feeds", "." ]
python
train
30.529412
sporteasy/python-poeditor
poeditor/client.py
https://github.com/sporteasy/python-poeditor/blob/e9c0a8ab08816903122f730b73ffaab46601076c/poeditor/client.py#L723-L732
def list_contributors(self, project_id=None, language_code=None): """ Returns the list of contributors """ data = self._run( url_path="contributors/list", id=project_id, language=language_code ) return data['result'].get('contributors', [])
[ "def", "list_contributors", "(", "self", ",", "project_id", "=", "None", ",", "language_code", "=", "None", ")", ":", "data", "=", "self", ".", "_run", "(", "url_path", "=", "\"contributors/list\"", ",", "id", "=", "project_id", ",", "language", "=", "lang...
Returns the list of contributors
[ "Returns", "the", "list", "of", "contributors" ]
python
train
31.5
google/apitools
apitools/base/py/transfer.py
https://github.com/google/apitools/blob/f3745a7ea535aa0e88b0650c16479b696d6fd446/apitools/base/py/transfer.py#L299-L332
def InitializeDownload(self, http_request, http=None, client=None): """Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead. """ self.EnsureUninitialized() if http is None and client is None: raise exceptions.UserError('Must provide client or http.') http = http or client.http if client is not None: http_request.url = client.FinalizeTransferUrl(http_request.url) url = http_request.url if self.auto_transfer: end_byte = self.__ComputeEndByte(0) self.__SetRangeHeader(http_request, 0, end_byte) response = http_wrapper.MakeRequest( self.bytes_http or http, http_request) if response.status_code not in self._ACCEPTABLE_STATUSES: raise exceptions.HttpError.FromResponse(response) self.__initial_response = response self.__SetTotal(response.info) url = response.info.get('content-location', response.request_url) if client is not None: url = client.FinalizeTransferUrl(url) self._Initialize(http, url) # Unless the user has requested otherwise, we want to just # go ahead and pump the bytes now. if self.auto_transfer: self.StreamInChunks()
[ "def", "InitializeDownload", "(", "self", ",", "http_request", ",", "http", "=", "None", ",", "client", "=", "None", ")", ":", "self", ".", "EnsureUninitialized", "(", ")", "if", "http", "is", "None", "and", "client", "is", "None", ":", "raise", "excepti...
Initialize this download by making a request. Args: http_request: The HttpRequest to use to initialize this download. http: The httplib2.Http instance for this request. client: If provided, let this client process the final URL before sending any additional requests. If client is provided and http is not, client.http will be used instead.
[ "Initialize", "this", "download", "by", "making", "a", "request", "." ]
python
train
47.882353
Robpol86/libnl
libnl/genl/mngt.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/genl/mngt.py#L151-L163
def lookup_family_by_name(name): """https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106. Positional arguments: name -- string. Returns: genl_ops class instance or None. """ for ops in nl_list_for_each_entry(genl_ops(), genl_ops_list, 'o_list'): if ops.o_name == name: return ops return None
[ "def", "lookup_family_by_name", "(", "name", ")", ":", "for", "ops", "in", "nl_list_for_each_entry", "(", "genl_ops", "(", ")", ",", "genl_ops_list", ",", "'o_list'", ")", ":", "if", "ops", ".", "o_name", "==", "name", ":", "return", "ops", "return", "None...
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/mngt.c#L106. Positional arguments: name -- string. Returns: genl_ops class instance or None.
[ "https", ":", "//", "github", ".", "com", "/", "thom311", "/", "libnl", "/", "blob", "/", "libnl3_2_25", "/", "lib", "/", "genl", "/", "mngt", ".", "c#L106", "." ]
python
train
27
ioos/compliance-checker
compliance_checker/cfutil.py
https://github.com/ioos/compliance-checker/blob/ee89c27b0daade58812489a2da3aa3b6859eafd9/compliance_checker/cfutil.py#L152-L163
def is_unitless(ds, variable): ''' Returns true if the variable is unitless Note units of '1' are considered whole numbers or parts but still represent physical units and not the absence of units. :param netCDF4.Dataset ds: An open netCDF dataset :param str variable: Name of the variable ''' units = getattr(ds.variables[variable], 'units', None) return units is None or units == ''
[ "def", "is_unitless", "(", "ds", ",", "variable", ")", ":", "units", "=", "getattr", "(", "ds", ".", "variables", "[", "variable", "]", ",", "'units'", ",", "None", ")", "return", "units", "is", "None", "or", "units", "==", "''" ]
Returns true if the variable is unitless Note units of '1' are considered whole numbers or parts but still represent physical units and not the absence of units. :param netCDF4.Dataset ds: An open netCDF dataset :param str variable: Name of the variable
[ "Returns", "true", "if", "the", "variable", "is", "unitless" ]
python
train
34.166667
rasguanabana/ytfs
ytfs/actions.py
https://github.com/rasguanabana/ytfs/blob/67dd9536a1faea09c8394f697529124f78e77cfa/ytfs/actions.py#L113-L141
def __getChannelId(self): """ Obtain channel id for channel name, if present in ``self.search_params``. """ if not self.search_params.get("channelId"): return api_fixed_url = "https://www.googleapis.com/youtube/v3/channels?part=id&maxResults=1&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "forUsername": self.search_params["channelId"]}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id'] return # got it except IndexError: pass # try searching now... api_fixed_url = "https://www.googleapis.com/youtube/v3/search?part=snippet&type=channel&fields=items%2Fid&" url = api_fixed_url + urlencode({"key": self.api_key, "q": self.search_params['channelId']}) get = requests.get(url).json() try: self.search_params["channelId"] = get['items'][0]['id']['channelId'] except IndexError: del self.search_params["channelId"]
[ "def", "__getChannelId", "(", "self", ")", ":", "if", "not", "self", ".", "search_params", ".", "get", "(", "\"channelId\"", ")", ":", "return", "api_fixed_url", "=", "\"https://www.googleapis.com/youtube/v3/channels?part=id&maxResults=1&fields=items%2Fid&\"", "url", "=",...
Obtain channel id for channel name, if present in ``self.search_params``.
[ "Obtain", "channel", "id", "for", "channel", "name", "if", "present", "in", "self", ".", "search_params", "." ]
python
train
36.068966
gmr/tinman
tinman/handlers/rabbitmq.py
https://github.com/gmr/tinman/blob/98f0acd15a228d752caa1864cdf02aaa3d492a9f/tinman/handlers/rabbitmq.py#L59-L69
def _add_to_publish_stack(self, exchange, routing_key, message, properties): """Temporarily add the message to the stack to publish to RabbitMQ :param str exchange: The exchange to publish to :param str routing_key: The routing key to publish with :param str message: The message body :param pika.BasicProperties: The message properties """ global message_stack message_stack.append((exchange, routing_key, message, properties))
[ "def", "_add_to_publish_stack", "(", "self", ",", "exchange", ",", "routing_key", ",", "message", ",", "properties", ")", ":", "global", "message_stack", "message_stack", ".", "append", "(", "(", "exchange", ",", "routing_key", ",", "message", ",", "properties",...
Temporarily add the message to the stack to publish to RabbitMQ :param str exchange: The exchange to publish to :param str routing_key: The routing key to publish with :param str message: The message body :param pika.BasicProperties: The message properties
[ "Temporarily", "add", "the", "message", "to", "the", "stack", "to", "publish", "to", "RabbitMQ" ]
python
train
44
kgori/treeCl
treeCl/plotter.py
https://github.com/kgori/treeCl/blob/fed624b3db1c19cc07175ca04e3eda6905a8d305/treeCl/plotter.py#L151-L230
def plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): """ plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): Plot a 2D / 3D scatterplot of coordinates, optionally coloured by group membership. Args: coordinates: numpy array or treeCl.CoordinateMatrix - The coordinates of the points to plot. The number of columns determines the number of dimensions in the plot. add_sphere: bool - Add a wireframe sphere to a 3D plot. Spectral clustering places points on the surface of a unit sphere. colours: list of rgb hexes, or 'auto', or None - Colours to use to colour the points, as a list of RGB hex values. If None, defaults (colorbrewer set3). If 'auto', generates a set of colours equally spaced from the colour wheel. labels: Tuple(xlab, ylab, title, zlab) - Plot labels. Must be given in the above order. Missing options will be replaced by None. E.g. to set the title: (None, None, "Some points") outfile: str - Save figure to this filename """ if isinstance(coordinates, CoordinateMatrix): coordinates = coordinates.values dimensions = min(3, coordinates.shape[1]) partition = (partition or Partition(tuple([0] * len(coordinates)))) ngrp = partition.num_groups() if colours is None: colours = SET2 elif colours == 'auto': colours = ggColorSlice(ngrp) colour_cycle = itertools.cycle(colours) colours = np.array([hex2color(c) for c in itertools.islice(colour_cycle, ngrp)]) if labels is None: xlab, ylab, zlab, title = None, None, None, None else: if isinstance(labels, (tuple, list)): labels = list(labels[:4]) labels.extend([None]*(4-len(labels))) xlab, ylab, title, zlab = labels fig = plt.figure() if dimensions == 3: ax = fig.add_subplot(111, projection='3d') if add_sphere: ax = _add_sphere(ax) else: ax = fig.add_subplot(111) members = partition.get_membership() for grp in range(ngrp): index = np.array(members[grp]) points = coordinates[index,:dimensions].T ax.scatter(*points, s=point_size, c=colours[grp], edgecolor=None, label='Group {}'.format(grp+1), **kwargs) if xlab: ax.set_xlabel(xlab) if ylab: ax.set_ylabel(ylab) if zlab: ax.set_zlabel(zlab) if title: ax.set_title(title) if legend: plt.legend() if outfile: fig.savefig('{0}.pdf'.format(outfile)) return fig
[ "def", "plot_embedding", "(", "coordinates", ",", "partition", "=", "None", ",", "add_sphere", "=", "False", ",", "point_size", "=", "8", ",", "colours", "=", "None", ",", "labels", "=", "None", ",", "legend", "=", "True", ",", "outfile", "=", "False", ...
plot_embedding(coordinates, partition=None, add_sphere=False, point_size=8, colours=None, labels=None, legend=True, outfile=False, **kwargs): Plot a 2D / 3D scatterplot of coordinates, optionally coloured by group membership. Args: coordinates: numpy array or treeCl.CoordinateMatrix - The coordinates of the points to plot. The number of columns determines the number of dimensions in the plot. add_sphere: bool - Add a wireframe sphere to a 3D plot. Spectral clustering places points on the surface of a unit sphere. colours: list of rgb hexes, or 'auto', or None - Colours to use to colour the points, as a list of RGB hex values. If None, defaults (colorbrewer set3). If 'auto', generates a set of colours equally spaced from the colour wheel. labels: Tuple(xlab, ylab, title, zlab) - Plot labels. Must be given in the above order. Missing options will be replaced by None. E.g. to set the title: (None, None, "Some points") outfile: str - Save figure to this filename
[ "plot_embedding", "(", "coordinates", "partition", "=", "None", "add_sphere", "=", "False", "point_size", "=", "8", "colours", "=", "None", "labels", "=", "None", "legend", "=", "True", "outfile", "=", "False", "**", "kwargs", ")", ":", "Plot", "a", "2D", ...
python
train
35.175
agoragames/haigha
haigha/connections/rabbit_connection.py
https://github.com/agoragames/haigha/blob/7b004e1c0316ec14b94fec1c54554654c38b1a25/haigha/connections/rabbit_connection.py#L219-L225
def nack(self, delivery_tag, multiple=False, requeue=False): '''Send a nack to the broker.''' args = Writer() args.write_longlong(delivery_tag).\ write_bits(multiple, requeue) self.send_frame(MethodFrame(self.channel_id, 60, 120, args))
[ "def", "nack", "(", "self", ",", "delivery_tag", ",", "multiple", "=", "False", ",", "requeue", "=", "False", ")", ":", "args", "=", "Writer", "(", ")", "args", ".", "write_longlong", "(", "delivery_tag", ")", ".", "write_bits", "(", "multiple", ",", "...
Send a nack to the broker.
[ "Send", "a", "nack", "to", "the", "broker", "." ]
python
train
39.285714
jidn/flask-resteasy
flask_resteasy.py
https://github.com/jidn/flask-resteasy/blob/79d4a50705d4300c91ab3bafefac23782768be2a/flask_resteasy.py#L104-L124
def init_app(self, app): """Initialize actions with the app or blueprint. :param app: the Flask application or blueprint object :type app: :class:`~flask.Flask` or :class:`~flask.Blueprint` Examples:: api = Api() api.add_resource(...) api.init_app(blueprint) """ try: # Assume this is a blueprint and defer initialization if app._got_registered_once is True: raise ValueError("""Blueprint is already registered with an app.""") app.record(self._deferred_blueprint_init) except AttributeError: self._init_app(app) else: self.blueprint = app
[ "def", "init_app", "(", "self", ",", "app", ")", ":", "try", ":", "# Assume this is a blueprint and defer initialization", "if", "app", ".", "_got_registered_once", "is", "True", ":", "raise", "ValueError", "(", "\"\"\"Blueprint is already registered with an app.\"\"\"", ...
Initialize actions with the app or blueprint. :param app: the Flask application or blueprint object :type app: :class:`~flask.Flask` or :class:`~flask.Blueprint` Examples:: api = Api() api.add_resource(...) api.init_app(blueprint)
[ "Initialize", "actions", "with", "the", "app", "or", "blueprint", "." ]
python
train
33.238095
twitterdev/tweet_parser
tweet_parser/tweet.py
https://github.com/twitterdev/tweet_parser/blob/3435de8367d36b483a6cfd8d46cc28694ee8a42e/tweet_parser/tweet.py#L530-L552
def retweeted_tweet(self): """ The retweeted Tweet as a Tweet object If the Tweet is not a Retweet, return None If the Retweet payload cannot be loaded as a Tweet, this will raise a `NotATweetError` Returns: Tweet: A Tweet representing the retweeted status (or None) (see tweet_embeds.get_retweet, this is that value as a Tweet) Raises: NotATweetError: if retweeted tweet is malformed """ retweet = tweet_embeds.get_retweeted_tweet(self) if retweet is not None: try: return Tweet(retweet) except NotATweetError as nate: raise(NotATweetError("The retweet payload appears malformed." + " Failed with '{}'".format(nate))) else: return None
[ "def", "retweeted_tweet", "(", "self", ")", ":", "retweet", "=", "tweet_embeds", ".", "get_retweeted_tweet", "(", "self", ")", "if", "retweet", "is", "not", "None", ":", "try", ":", "return", "Tweet", "(", "retweet", ")", "except", "NotATweetError", "as", ...
The retweeted Tweet as a Tweet object If the Tweet is not a Retweet, return None If the Retweet payload cannot be loaded as a Tweet, this will raise a `NotATweetError` Returns: Tweet: A Tweet representing the retweeted status (or None) (see tweet_embeds.get_retweet, this is that value as a Tweet) Raises: NotATweetError: if retweeted tweet is malformed
[ "The", "retweeted", "Tweet", "as", "a", "Tweet", "object", "If", "the", "Tweet", "is", "not", "a", "Retweet", "return", "None", "If", "the", "Retweet", "payload", "cannot", "be", "loaded", "as", "a", "Tweet", "this", "will", "raise", "a", "NotATweetError" ...
python
train
36.73913
merll/docker-map
dockermap/map/input.py
https://github.com/merll/docker-map/blob/e14fe86a6ff5c33d121eb2f9157e9359cb80dd02/dockermap/map/input.py#L365-L384
def get_healthcheck(value): """ Converts input into a :class:`HealthCheck` tuple. Input can be passed as string, tuple, list, or a dictionary. If set to ``None``, the health check will be set to ``NONE``, i.e. override an existing configuration from the image. :param value: Health check input. :type value: unicode | str | tuple | list | NoneType :return: HealthCheck tuple :rtype: HealthCheck """ if isinstance(value, HealthCheck): return value elif isinstance(value, six.string_types + (lazy_type,)) or uses_type_registry(value): return HealthCheck(value) elif isinstance(value, (tuple, list)): return HealthCheck(*value) elif isinstance(value, dict): return HealthCheck(**value) raise ValueError( "Invalid type; expected a list, tuple, dict, or string type, found {0}.".format(type(value).__name__))
[ "def", "get_healthcheck", "(", "value", ")", ":", "if", "isinstance", "(", "value", ",", "HealthCheck", ")", ":", "return", "value", "elif", "isinstance", "(", "value", ",", "six", ".", "string_types", "+", "(", "lazy_type", ",", ")", ")", "or", "uses_ty...
Converts input into a :class:`HealthCheck` tuple. Input can be passed as string, tuple, list, or a dictionary. If set to ``None``, the health check will be set to ``NONE``, i.e. override an existing configuration from the image. :param value: Health check input. :type value: unicode | str | tuple | list | NoneType :return: HealthCheck tuple :rtype: HealthCheck
[ "Converts", "input", "into", "a", ":", "class", ":", "HealthCheck", "tuple", ".", "Input", "can", "be", "passed", "as", "string", "tuple", "list", "or", "a", "dictionary", ".", "If", "set", "to", "None", "the", "health", "check", "will", "be", "set", "...
python
train
43.85
Azure/azure-sdk-for-python
azure-servicebus/azure/servicebus/common/mixins.py
https://github.com/Azure/azure-sdk-for-python/blob/d7306fde32f60a293a7567678692bdad31e4b667/azure-servicebus/azure/servicebus/common/mixins.py#L174-L222
def create_subscription( self, topic_name, subscription_name, lock_duration=30, requires_session=None, default_message_time_to_live=None, dead_lettering_on_message_expiration=None, dead_lettering_on_filter_evaluation_exceptions=None, enable_batched_operations=None, max_delivery_count=None): """Create a subscription entity. :param topic_name: The name of the topic under which to create the subscription. :param subscription_name: The name of the new subscription. :type subscription_name: str :param lock_duration: The lock durection in seconds for each message in the subscription. :type lock_duration: int :param requires_session: Whether the subscription will be sessionful, and therefore require all message to have a Session ID and be received by a sessionful receiver. Default value is False. :type requires_session: bool :param default_message_time_to_live: The length of time a message will remain in the subscription before it is either discarded or moved to the dead letter queue. :type default_message_time_to_live: ~datetime.timedelta :param dead_lettering_on_message_expiration: Whether to move expired messages to the dead letter queue. Default value is False. :type dead_lettering_on_message_expiration: bool :param dead_lettering_on_filter_evaluation_exceptions: Whether to move messages that error on filtering into the dead letter queue. Default is False, and the messages will be discarded. :type dead_lettering_on_filter_evaluation_exceptions: bool :param max_delivery_count: The maximum number of times a message will attempt to be delivered before it is moved to the dead letter queue. :type max_delivery_count: int :param enable_batched_operations: :type: enable_batched_operations: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists. """ sub_properties = Subscription( lock_duration="PT{}S".format(int(lock_duration)), requires_session=requires_session, default_message_time_to_live=default_message_time_to_live, dead_lettering_on_message_expiration=dead_lettering_on_message_expiration, dead_lettering_on_filter_evaluation_exceptions=dead_lettering_on_filter_evaluation_exceptions, max_delivery_count=max_delivery_count, enable_batched_operations=enable_batched_operations) try: return self.mgmt_client.create_subscription( topic_name, subscription_name, subscription=sub_properties, fail_on_exist=True) except requests.exceptions.ConnectionError as e: raise ServiceBusConnectionError("Namespace: {} not found".format(self.service_namespace), e)
[ "def", "create_subscription", "(", "self", ",", "topic_name", ",", "subscription_name", ",", "lock_duration", "=", "30", ",", "requires_session", "=", "None", ",", "default_message_time_to_live", "=", "None", ",", "dead_lettering_on_message_expiration", "=", "None", "...
Create a subscription entity. :param topic_name: The name of the topic under which to create the subscription. :param subscription_name: The name of the new subscription. :type subscription_name: str :param lock_duration: The lock durection in seconds for each message in the subscription. :type lock_duration: int :param requires_session: Whether the subscription will be sessionful, and therefore require all message to have a Session ID and be received by a sessionful receiver. Default value is False. :type requires_session: bool :param default_message_time_to_live: The length of time a message will remain in the subscription before it is either discarded or moved to the dead letter queue. :type default_message_time_to_live: ~datetime.timedelta :param dead_lettering_on_message_expiration: Whether to move expired messages to the dead letter queue. Default value is False. :type dead_lettering_on_message_expiration: bool :param dead_lettering_on_filter_evaluation_exceptions: Whether to move messages that error on filtering into the dead letter queue. Default is False, and the messages will be discarded. :type dead_lettering_on_filter_evaluation_exceptions: bool :param max_delivery_count: The maximum number of times a message will attempt to be delivered before it is moved to the dead letter queue. :type max_delivery_count: int :param enable_batched_operations: :type: enable_batched_operations: bool :raises: ~azure.servicebus.common.errors.ServiceBusConnectionError if the namespace is not found. :raises: ~azure.common.AzureConflictHttpError if a queue of the same name already exists.
[ "Create", "a", "subscription", "entity", "." ]
python
test
61.714286
phoebe-project/phoebe2
phoebe/backend/oc_geometry.py
https://github.com/phoebe-project/phoebe2/blob/e64b8be683977064e2d55dd1b3ac400f64c3e379/phoebe/backend/oc_geometry.py#L118-L184
def nekmin(omega_in,q,x0=0.5,z0=0.5): '''Computes the position of the neck (minimal radius) in an contact_binary star1''' def Omega_xz(q,x,z): return 1./np.sqrt(x**2+z**2)+q/np.sqrt((1-x)**2+z**2)+(q+1)*x**2/2.-q*x def Omega_xy(q,x,y): return 1./np.sqrt(x**2+y**2)+q/np.sqrt((1-x)**2+y**2)+(q+1)*(x**2+y**2)/2.-q*x def dOmegadx_z(q,x,z): return -x/(x**2+z**2)**(3./2)+q*(1-x)/((1-x)**2+z**2)**(3./2.)+(q+1)*x-q def dOmegadx_y(q,x,y): return -x/(x**2+y**2)**(3./2)+q*(1-x)/((1-x)**2+y**2)**(3./2.)+(q+1)*x-q def dOmegadz(q,x,z): return -z/(x**2+z**2)**(3./2)-q*z/((1-x)**2+z**2)**(3./2.) def dOmegady(q,x,y): return -y/(x**2+y**2)**(3./2)-q*y/((1-x)**2+y**2)**(3./2.)+(q+1)*y def d2Omegadx2_z(q,x,z): return (2*x**2-z**2)/(x**2+z**2)**(5./2)+q*(2*(1-x)**2-z**2)/((1-x)**2+z**2)**(5./2)+(q+1) def d2Omegadx2_y(q,x,y): return (2*x**2-y**2)/(x**2+y**2)**(5./2)+q*(2*(1-x)**2-y**2)/((1-x)**2+y**2)**(5./2)+(q+1) def d2Omegadxdz(q,x,z): return 3*x*z/(x**2+z**2)**(5./2)-3*q*x*(1-x)/((1-x)**2+z**2)**(5./2) def d2Omegadxdy(q,x,y): return 3*x*y/(x**2+y**2)**(5./2)-3*q*x*(1-x)/((1-x)**2+y**2)**(5./2) xz,z = x0,z0 dxz, dz = 1.,1. # find solution in xz plane while abs(dxz)>1e-8 and abs(dz)>1e-8: delz = 1. z=0.05 while abs(delz) > 0.000001: delom = omega_in - Omega_xz(q,xz,z) delz = delom/dOmegadz(q,xz,z) z = abs(z+delz) DN = np.array([[dOmegadx_z(q,xz,z),dOmegadz(q,xz,z)],[d2Omegadx2_z(q,xz,z),d2Omegadxdz(q,xz,z)]]) EN = np.array([omega_in-Omega_xz(q,xz,z),(-1)*dOmegadx_z(q,xz,z)]) a,b,c,d = DN[0][0],DN[0][1],DN[1][0],DN[1][1] if (a*d-b*c)!=0.: DNINV = 1./(a*d-b*c)*np.array([[d,(-1)*b],[(-1)*c,d]]) #DNINV = inv(DN) dd = np.dot(DNINV,EN) dxz,dz = dd[0],dd[1] xz=xz+dxz z=z+dz else: xz = xz+0.5 z = z+0.5 dxz = 1. dz = 1. return xz,z
[ "def", "nekmin", "(", "omega_in", ",", "q", ",", "x0", "=", "0.5", ",", "z0", "=", "0.5", ")", ":", "def", "Omega_xz", "(", "q", ",", "x", ",", "z", ")", ":", "return", "1.", "/", "np", ".", "sqrt", "(", "x", "**", "2", "+", "z", "**", "2...
Computes the position of the neck (minimal radius) in an contact_binary star1
[ "Computes", "the", "position", "of", "the", "neck", "(", "minimal", "radius", ")", "in", "an", "contact_binary", "star1" ]
python
train
30.58209
ponty/confduino
confduino/examples/board.py
https://github.com/ponty/confduino/blob/f4c261e5e84997f145a8bdd001f471db74c9054b/confduino/examples/board.py#L17-L40
def install_board_with_programmer(mcu, programmer, f_cpu=16000000, core='arduino', replace_existing=False, ): """install board with programmer.""" bunch = AutoBunch() board_id = '{mcu}_{f_cpu}_{programmer}'.format(f_cpu=f_cpu, mcu=mcu, programmer=programmer, ) bunch.name = '{mcu}@{f} Prog:{programmer}'.format(f=strfreq(f_cpu), mcu=mcu, programmer=programmer, ) bunch.upload.using = programmer bunch.build.mcu = mcu bunch.build.f_cpu = str(f_cpu) + 'L' bunch.build.core = core install_board(board_id, bunch, replace_existing=replace_existing)
[ "def", "install_board_with_programmer", "(", "mcu", ",", "programmer", ",", "f_cpu", "=", "16000000", ",", "core", "=", "'arduino'", ",", "replace_existing", "=", "False", ",", ")", ":", "bunch", "=", "AutoBunch", "(", ")", "board_id", "=", "'{mcu}_{f_cpu}_{pr...
install board with programmer.
[ "install", "board", "with", "programmer", "." ]
python
train
42.75
dropbox/stone
stone/frontend/parser.py
https://github.com/dropbox/stone/blob/2e95cbcd1c48e05cca68c919fd8d24adec6b0f58/stone/frontend/parser.py#L318-L327
def p_annotation_type(self, p): """annotation_type : ANNOTATION_TYPE ID NL \ INDENT docsection field_list DEDENT""" p[0] = AstAnnotationTypeDef( path=self.path, lineno=p.lineno(1), lexpos=p.lexpos(1), name=p[2], doc=p[5], params=p[6])
[ "def", "p_annotation_type", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "AstAnnotationTypeDef", "(", "path", "=", "self", ".", "path", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ",", "lexpos", "=", "p", ".", "lexpos", "(...
annotation_type : ANNOTATION_TYPE ID NL \ INDENT docsection field_list DEDENT
[ "annotation_type", ":", "ANNOTATION_TYPE", "ID", "NL", "\\", "INDENT", "docsection", "field_list", "DEDENT" ]
python
train
34.3
Duke-QCD/hic
hic/flow.py
https://github.com/Duke-QCD/hic/blob/9afb141735b1ac228d296a2349225d2bdcdb68f0/hic/flow.py#L13-L36
def qn(phi, *n): """ Calculate the complex flow vector `Q_n`. :param array-like phi: Azimuthal angles. :param int n: One or more harmonics to calculate. :returns: A single complex number if only one ``n`` was given or a complex array for multiple ``n``. """ phi = np.ravel(phi) n = np.asarray(n) i_n_phi = np.zeros((n.size, phi.size), dtype=complex) np.outer(n, phi, out=i_n_phi.imag) qn = np.exp(i_n_phi, out=i_n_phi).sum(axis=1) if qn.size == 1: qn = qn[0] return qn
[ "def", "qn", "(", "phi", ",", "*", "n", ")", ":", "phi", "=", "np", ".", "ravel", "(", "phi", ")", "n", "=", "np", ".", "asarray", "(", "n", ")", "i_n_phi", "=", "np", ".", "zeros", "(", "(", "n", ".", "size", ",", "phi", ".", "size", ")"...
Calculate the complex flow vector `Q_n`. :param array-like phi: Azimuthal angles. :param int n: One or more harmonics to calculate. :returns: A single complex number if only one ``n`` was given or a complex array for multiple ``n``.
[ "Calculate", "the", "complex", "flow", "vector", "Q_n", "." ]
python
train
22
pyviz/holoviews
holoviews/core/pprint.py
https://github.com/pyviz/holoviews/blob/ae0dd2f3de448b0ca5e9065aabd6ef8d84c7e655/holoviews/core/pprint.py#L354-L364
def element_info(cls_or_slf, node, siblings, level, value_dims): """ Return the information summary for an Element. This consists of the dotted name followed by an value dimension names. """ info = cls_or_slf.component_type(node) if len(node.kdims) >= 1: info += cls_or_slf.tab + '[%s]' % ','.join(d.name for d in node.kdims) if value_dims and len(node.vdims) >= 1: info += cls_or_slf.tab + '(%s)' % ','.join(d.name for d in node.vdims) return level, [(level, info)]
[ "def", "element_info", "(", "cls_or_slf", ",", "node", ",", "siblings", ",", "level", ",", "value_dims", ")", ":", "info", "=", "cls_or_slf", ".", "component_type", "(", "node", ")", "if", "len", "(", "node", ".", "kdims", ")", ">=", "1", ":", "info", ...
Return the information summary for an Element. This consists of the dotted name followed by an value dimension names.
[ "Return", "the", "information", "summary", "for", "an", "Element", ".", "This", "consists", "of", "the", "dotted", "name", "followed", "by", "an", "value", "dimension", "names", "." ]
python
train
49.454545
tanghaibao/goatools
goatools/wr_tbl_class.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/wr_tbl_class.py#L64-L97
def wr_data(self, xlsx_data, row_i, worksheet): """Write data into xlsx worksheet.""" fld2fmt = self.vars.fld2fmt # User may specify to skip rows based on values in row prt_if = self.vars.prt_if # User may specify a subset of columns to print or # a column ordering different from the _fields seen in the namedtuple prt_flds = self.wbfmtobj.get_prt_flds() get_wbfmt = self.wbfmtobj.get_wbfmt if self.vars.sort_by is not None: xlsx_data = sorted(xlsx_data, key=self.vars.sort_by) try: for data_nt in xlsx_data: if prt_if is None or prt_if(data_nt): wbfmt = get_wbfmt(data_nt) # xlsxwriter.format.Format created w/add_format # Print an xlsx row by printing each column in order. for col_i, fld in enumerate(prt_flds): try: # If fld "format_txt" present, use val for formatting, but don't print. val = getattr(data_nt, fld, "") # Optional user-formatting of specific fields, eg, pval: "{:8.2e}" # If field value is empty (""), don't use fld2fmt if fld2fmt is not None and fld in fld2fmt and val != "" and val != "*": val = fld2fmt[fld].format(val) worksheet.write(row_i, col_i, val, wbfmt) except: raise RuntimeError(self._get_err_msg(row_i, col_i, fld, val, prt_flds)) row_i += 1 except RuntimeError as inst: import traceback traceback.print_exc() sys.stderr.write("\n **FATAL in wr_data: {MSG}\n\n".format(MSG=str(inst))) sys.exit(1) return row_i
[ "def", "wr_data", "(", "self", ",", "xlsx_data", ",", "row_i", ",", "worksheet", ")", ":", "fld2fmt", "=", "self", ".", "vars", ".", "fld2fmt", "# User may specify to skip rows based on values in row", "prt_if", "=", "self", ".", "vars", ".", "prt_if", "# User m...
Write data into xlsx worksheet.
[ "Write", "data", "into", "xlsx", "worksheet", "." ]
python
train
54.529412
waqasbhatti/astrobase
astrobase/varclass/varfeatures.py
https://github.com/waqasbhatti/astrobase/blob/2922a14619d183fb28005fa7d02027ac436f2265/astrobase/varclass/varfeatures.py#L68-L131
def stetson_jindex(ftimes, fmags, ferrs, weightbytimediff=False): '''This calculates the Stetson index for the magseries, based on consecutive pairs of observations. Based on Nicole Loncke's work for her Planets and Life certificate at Princeton in 2014. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. weightbytimediff : bool If this is True, the Stetson index for any pair of mags will be reweighted by the difference in times between them using the scheme in Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017):: w_i = exp(- (t_i+1 - t_i)/ delta_t ) Returns ------- float The calculated Stetson J variability index. ''' ndet = len(fmags) if ndet > 9: # get the median and ndet medmag = npmedian(fmags) # get the stetson index elements delta_prefactor = (ndet/(ndet - 1)) sigma_i = delta_prefactor*(fmags - medmag)/ferrs # Nicole's clever trick to advance indices by 1 and do x_i*x_(i+1) sigma_j = nproll(sigma_i,1) if weightbytimediff: difft = npdiff(ftimes) deltat = npmedian(difft) weights_i = npexp(- difft/deltat ) products = (weights_i*sigma_i[1:]*sigma_j[1:]) else: # ignore first elem since it's actually x_0*x_n products = (sigma_i*sigma_j)[1:] stetsonj = ( npsum(npsign(products) * npsqrt(npabs(products))) ) / ndet return stetsonj else: LOGERROR('not enough detections in this magseries ' 'to calculate stetson J index') return npnan
[ "def", "stetson_jindex", "(", "ftimes", ",", "fmags", ",", "ferrs", ",", "weightbytimediff", "=", "False", ")", ":", "ndet", "=", "len", "(", "fmags", ")", "if", "ndet", ">", "9", ":", "# get the median and ndet", "medmag", "=", "npmedian", "(", "fmags", ...
This calculates the Stetson index for the magseries, based on consecutive pairs of observations. Based on Nicole Loncke's work for her Planets and Life certificate at Princeton in 2014. Parameters ---------- ftimes,fmags,ferrs : np.array The input mag/flux time-series with all non-finite elements removed. weightbytimediff : bool If this is True, the Stetson index for any pair of mags will be reweighted by the difference in times between them using the scheme in Fruth+ 2012 and Zhange+ 2003 (as seen in Sokolovsky+ 2017):: w_i = exp(- (t_i+1 - t_i)/ delta_t ) Returns ------- float The calculated Stetson J variability index.
[ "This", "calculates", "the", "Stetson", "index", "for", "the", "magseries", "based", "on", "consecutive", "pairs", "of", "observations", "." ]
python
valid
26.6875
icgood/pysasl
pysasl/__init__.py
https://github.com/icgood/pysasl/blob/241bdd349577cc99f05c4239755c307e6a46018c/pysasl/__init__.py#L393-L405
def get_client(self, name): """Like :meth:`.get`, but only mechanisms inheriting :class:`ClientMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None`` """ mech = self.get(name) return mech if isinstance(mech, ClientMechanism) else None
[ "def", "get_client", "(", "self", ",", "name", ")", ":", "mech", "=", "self", ".", "get", "(", "name", ")", "return", "mech", "if", "isinstance", "(", "mech", ",", "ClientMechanism", ")", "else", "None" ]
Like :meth:`.get`, but only mechanisms inheriting :class:`ClientMechanism` will be returned. Args: name: The SASL mechanism name. Returns: The mechanism object or ``None``
[ "Like", ":", "meth", ":", ".", "get", "but", "only", "mechanisms", "inheriting", ":", "class", ":", "ClientMechanism", "will", "be", "returned", "." ]
python
train
27.538462
LISE-B26/pylabcontrol
build/lib/pylabcontrol/src/core/read_write_functions.py
https://github.com/LISE-B26/pylabcontrol/blob/67482e5157fcd1c40705e5c2cacfb93564703ed0/build/lib/pylabcontrol/src/core/read_write_functions.py#L95-L160
def save_b26_file(filename, instruments=None, scripts=None, probes=None, overwrite=False, verbose=False): """ save instruments, scripts and probes as a json file Args: filename: instruments: scripts: probes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...} Returns: """ # if overwrite is false load existing data and append to new instruments if os.path.isfile(filename) and overwrite == False: data_dict = load_b26_file(filename) else: data_dict = {} if instruments is not None: if 'instruments' in data_dict: data_dict['instruments'].update(instruments) else: data_dict['instruments'] = instruments if scripts is not None: if 'scripts' in data_dict: data_dict['scripts'].update(scripts) else: data_dict['scripts'] = scripts if probes is not None: probe_instruments = list(probes.keys()) if 'probes' in data_dict: # all the instruments required for old and new probes probe_instruments= set(probe_instruments + list(data_dict['probes'].keys())) else: data_dict.update({'probes':{}}) for instrument in probe_instruments: if instrument in data_dict['probes'] and instrument in probes: # update the data_dict data_dict['probes'][instrument] = ','.join(set(data_dict['probes'][instrument].split(',') + probes[instrument].split(','))) else: data_dict['probes'].update(probes) if verbose: print(('writing ', filename)) if data_dict != {}: # if platform == 'Windows': # # windows can't deal with long filenames so we have to use the prefix '\\\\?\\' # if len(filename.split('\\\\?\\')) == 1: # filename = '\\\\?\\'+ filename # create folder if it doesn't exist if verbose: print(('filename', filename)) print(('exists', os.path.exists(os.path.dirname(filename)))) if os.path.exists(os.path.dirname(filename)) is False: # print(('creating', os.path.dirname(filename))) os.makedirs(os.path.dirname(filename)) with open(filename, 'w') as outfile: tmp = json.dump(data_dict, outfile, indent=4)
[ "def", "save_b26_file", "(", "filename", ",", "instruments", "=", "None", ",", "scripts", "=", "None", ",", "probes", "=", "None", ",", "overwrite", "=", "False", ",", "verbose", "=", "False", ")", ":", "# if overwrite is false load existing data and append to new...
save instruments, scripts and probes as a json file Args: filename: instruments: scripts: probes: dictionary of the form {instrument_name : probe_1_of_intrument, probe_2_of_intrument, ...} Returns:
[ "save", "instruments", "scripts", "and", "probes", "as", "a", "json", "file", "Args", ":", "filename", ":", "instruments", ":", "scripts", ":", "probes", ":", "dictionary", "of", "the", "form", "{", "instrument_name", ":", "probe_1_of_intrument", "probe_2_of_int...
python
train
35.454545
catch22/pw
pw/__main__.py
https://github.com/catch22/pw/blob/2452924bbdccad28b21290b6ce062809c3d1c5f2/pw/__main__.py#L91-L182
def pw( ctx, key_pattern, user_pattern, mode, strict_flag, user_flag, file, edit_subcommand, gen_subcommand, ): """Search for USER and KEY in GPG-encrypted password file.""" # install silent Ctrl-C handler def handle_sigint(*_): click.echo() ctx.exit(1) signal.signal(signal.SIGINT, handle_sigint) # invoke a subcommand? if gen_subcommand: length = int(key_pattern) if key_pattern else None generate_password(mode, length) return elif edit_subcommand: launch_editor(ctx, file) return # verify that database file is present if not os.path.exists(file): click.echo("error: password store not found at '%s'" % file, err=True) ctx.exit(1) # load database store = Store.load(file) # if no user query provided, split key query according to right-most "@" sign (since usernames are typically email addresses) if not user_pattern: user_pattern, _, key_pattern = key_pattern.rpartition("@") # search database results = store.search(key_pattern, user_pattern) results = list(results) # if strict flag is enabled, check that precisely a single record was found if strict_flag and len(results) != 1: click.echo( "error: multiple or no records found (but using --strict flag)", err=True ) ctx.exit(2) # raw mode? if mode == Mode.RAW: for entry in results: click.echo(entry.user if user_flag else entry.password) return # print results for idx, entry in enumerate(results): # start with key and user line = highlight_match(key_pattern, entry.key) if entry.user: line += ": " + highlight_match(user_pattern, entry.user) # add password or copy&paste sucess message if mode == Mode.ECHO and not user_flag: line += " | " + style_password(entry.password) elif mode == Mode.COPY and idx == 0: try: import pyperclip pyperclip.copy(entry.user if user_flag else entry.password) result = style_success( "*** %s COPIED TO CLIPBOARD ***" % ("USERNAME" if user_flag else "PASSWORD") ) except ImportError: result = style_error('*** PYTHON PACKAGE "PYPERCLIP" NOT FOUND ***') line += " | " + result # add notes if entry.notes: if idx == 0: line += "\n" line += "\n".join(" " + line for line in entry.notes.splitlines()) else: lines = entry.notes.splitlines() line += " | " + lines[0] if len(lines) > 1: line += " (...)" click.echo(line)
[ "def", "pw", "(", "ctx", ",", "key_pattern", ",", "user_pattern", ",", "mode", ",", "strict_flag", ",", "user_flag", ",", "file", ",", "edit_subcommand", ",", "gen_subcommand", ",", ")", ":", "# install silent Ctrl-C handler", "def", "handle_sigint", "(", "*", ...
Search for USER and KEY in GPG-encrypted password file.
[ "Search", "for", "USER", "and", "KEY", "in", "GPG", "-", "encrypted", "password", "file", "." ]
python
train
30.163043
gbowerman/azurerm
azurerm/restfns.py
https://github.com/gbowerman/azurerm/blob/79d40431d3b13f8a36aadbff5029888383d72674/azurerm/restfns.py#L214-L249
def do_ams_post(endpoint, path, body, access_token, rformat="json", ds_min_version="3.0;NetFx"): '''Do a AMS HTTP POST request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body. ''' min_ds = dsversion_min content_acceptformat = json_acceptformat acceptformat = json_acceptformat if rformat == "json_only": min_ds = ds_min_version content_acceptformat = json_only_acceptformat if rformat == "xml": content_acceptformat = xml_acceptformat acceptformat = xml_acceptformat + ",application/xml" headers = {"Content-Type": content_acceptformat, "DataServiceVersion": min_ds, "MaxDataServiceVersion": dsversion_max, "Accept": acceptformat, "Accept-Charset" : charset, "Authorization": "Bearer " + access_token, "x-ms-version" : xmsversion} response = requests.post(endpoint, data=body, headers=headers, allow_redirects=False) # AMS response to the first call can be a redirect, # so we handle it here to make it transparent for the caller... if response.status_code == 301: redirected_url = ''.join([response.headers['location'], path]) response = requests.post(redirected_url, data=body, headers=headers) return response
[ "def", "do_ams_post", "(", "endpoint", ",", "path", ",", "body", ",", "access_token", ",", "rformat", "=", "\"json\"", ",", "ds_min_version", "=", "\"3.0;NetFx\"", ")", ":", "min_ds", "=", "dsversion_min", "content_acceptformat", "=", "json_acceptformat", "acceptf...
Do a AMS HTTP POST request and return JSON. Args: endpoint (str): Azure Media Services Initial Endpoint. path (str): Azure Media Services Endpoint Path. body (str): Azure Media Services Content Body. access_token (str): A valid Azure authentication token. rformat (str): A required JSON Accept Format. ds_min_version (str): A required DS MIN Version. Returns: HTTP response. JSON body.
[ "Do", "a", "AMS", "HTTP", "POST", "request", "and", "return", "JSON", ".", "Args", ":", "endpoint", "(", "str", ")", ":", "Azure", "Media", "Services", "Initial", "Endpoint", ".", "path", "(", "str", ")", ":", "Azure", "Media", "Services", "Endpoint", ...
python
train
45.638889
vtkiorg/vtki
vtki/filters.py
https://github.com/vtkiorg/vtki/blob/5ccad7ae6d64a03e9594c9c7474c8aab3eb22dd1/vtki/filters.py#L786-L811
def split_bodies(dataset, label=False): """Find, label, and split connected bodies/volumes. This splits different connected bodies into blocks in a MultiBlock dataset. Parameters ---------- label : bool A flag on whether to keep the ID arrays given by the ``connectivity`` filter. """ # Get the connectivity and label different bodies labeled = dataset.connectivity() classifier = labeled.cell_arrays['RegionId'] bodies = vtki.MultiBlock() for vid in np.unique(classifier): # Now extract it: b = labeled.threshold([vid-0.5, vid+0.5], scalars='RegionId') if not label: # strange behavior: # must use this method rather than deleting from the point_arrays # or else object is collected. b._remove_cell_scalar('RegionId') b._remove_point_scalar('RegionId') bodies.append(b) return bodies
[ "def", "split_bodies", "(", "dataset", ",", "label", "=", "False", ")", ":", "# Get the connectivity and label different bodies", "labeled", "=", "dataset", ".", "connectivity", "(", ")", "classifier", "=", "labeled", ".", "cell_arrays", "[", "'RegionId'", "]", "b...
Find, label, and split connected bodies/volumes. This splits different connected bodies into blocks in a MultiBlock dataset. Parameters ---------- label : bool A flag on whether to keep the ID arrays given by the ``connectivity`` filter.
[ "Find", "label", "and", "split", "connected", "bodies", "/", "volumes", ".", "This", "splits", "different", "connected", "bodies", "into", "blocks", "in", "a", "MultiBlock", "dataset", "." ]
python
train
38.807692
IBM/ibm-cos-sdk-python-s3transfer
ibm_s3transfer/aspera/manager.py
https://github.com/IBM/ibm-cos-sdk-python-s3transfer/blob/24ba53137213e26e6b8fc2c3ec1e8198d507d22b/ibm_s3transfer/aspera/manager.py#L245-L254
def _raw_aspera_metadata(self, bucket): ''' get the Aspera connection details on Aspera enabled buckets ''' response = self._client.get_bucket_aspera(Bucket=bucket) # Parse metadata from response aspera_access_key = response['AccessKey']['Id'] aspera_secret_key = response['AccessKey']['Secret'] ats_endpoint = response['ATSEndpoint'] return aspera_access_key, aspera_secret_key, ats_endpoint
[ "def", "_raw_aspera_metadata", "(", "self", ",", "bucket", ")", ":", "response", "=", "self", ".", "_client", ".", "get_bucket_aspera", "(", "Bucket", "=", "bucket", ")", "# Parse metadata from response", "aspera_access_key", "=", "response", "[", "'AccessKey'", "...
get the Aspera connection details on Aspera enabled buckets
[ "get", "the", "Aspera", "connection", "details", "on", "Aspera", "enabled", "buckets" ]
python
train
44.1
koszullab/metaTOR
metator/scripts/hicstuff.py
https://github.com/koszullab/metaTOR/blob/0c1203d1dffedfa5ea380c0335b4baa9cfb7e89a/metator/scripts/hicstuff.py#L1452-L1480
def positions_to_contigs(positions): """Label contigs according to relative positions Given a list of positions, return an ordered list of labels reflecting where the positions array started over (and presumably a new contig began). Parameters ---------- positions : list or array_like A piece-wise ordered list of integers representing positions Returns ------- contig_labels : numpy.ndarray The list of contig labels """ contig_labels = np.zeros_like(positions) contig_index = 0 for i, p in enumerate(positions): if p == 0: contig_index += 1 contig_labels[i] = contig_index return contig_labels
[ "def", "positions_to_contigs", "(", "positions", ")", ":", "contig_labels", "=", "np", ".", "zeros_like", "(", "positions", ")", "contig_index", "=", "0", "for", "i", ",", "p", "in", "enumerate", "(", "positions", ")", ":", "if", "p", "==", "0", ":", "...
Label contigs according to relative positions Given a list of positions, return an ordered list of labels reflecting where the positions array started over (and presumably a new contig began). Parameters ---------- positions : list or array_like A piece-wise ordered list of integers representing positions Returns ------- contig_labels : numpy.ndarray The list of contig labels
[ "Label", "contigs", "according", "to", "relative", "positions" ]
python
train
23.689655
rwl/pylon
pylon/io/psse.py
https://github.com/rwl/pylon/blob/916514255db1ae1661406f0283df756baf960d14/pylon/io/psse.py#L387-L397
def write_case_data(self, file): """ Writes case data to file. """ change_code = 0 s_base = self.case.base_mva timestr = time.strftime("%Y%m%d%H%M", time.gmtime()) file.write("%d, %8.2f, 30 / PSS(tm)E-30 RAW created by Pylon (%s).\n" % (change_code, s_base, timestr)) file.write("Modified by Hantao Cui, CURENT, UTK\n ") file.write("%s, %d BUSES, %d BRANCHES\n" % (self.case.name, len(self.case.buses), len(self.case.branches)))
[ "def", "write_case_data", "(", "self", ",", "file", ")", ":", "change_code", "=", "0", "s_base", "=", "self", ".", "case", ".", "base_mva", "timestr", "=", "time", ".", "strftime", "(", "\"%Y%m%d%H%M\"", ",", "time", ".", "gmtime", "(", ")", ")", "file...
Writes case data to file.
[ "Writes", "case", "data", "to", "file", "." ]
python
train
48.181818
eht16/django-axes-login-actions
axes_login_actions/signals.py
https://github.com/eht16/django-axes-login-actions/blob/1478e85831583eef8b4cb628a9744e5a16f9ef5a/axes_login_actions/signals.py#L15-L26
def import_dotted_path(path): """ Takes a dotted path to a member name in a module, and returns the member after importing it. """ # stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path) try: module_path, member_name = path.rsplit(".", 1) module = import_module(module_path) return getattr(module, member_name) except (ValueError, ImportError, AttributeError) as e: raise ImportError('Could not import the name: {}: {}'.format(path, e))
[ "def", "import_dotted_path", "(", "path", ")", ":", "# stolen from Mezzanine (mezzanine.utils.importing.import_dotted_path)", "try", ":", "module_path", ",", "member_name", "=", "path", ".", "rsplit", "(", "\".\"", ",", "1", ")", "module", "=", "import_module", "(", ...
Takes a dotted path to a member name in a module, and returns the member after importing it.
[ "Takes", "a", "dotted", "path", "to", "a", "member", "name", "in", "a", "module", "and", "returns", "the", "member", "after", "importing", "it", "." ]
python
train
41.666667
helixyte/everest
everest/representers/base.py
https://github.com/helixyte/everest/blob/70c9b93c3061db5cb62428349d18b8fb8566411b/everest/representers/base.py#L215-L224
def create_from_resource_class(cls, resource_class): """ Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class. """ mp_reg = get_mapping_registry(cls.content_type) mp = mp_reg.find_or_create_mapping(resource_class) return cls(resource_class, mp)
[ "def", "create_from_resource_class", "(", "cls", ",", "resource_class", ")", ":", "mp_reg", "=", "get_mapping_registry", "(", "cls", ".", "content_type", ")", "mp", "=", "mp_reg", ".", "find_or_create_mapping", "(", "resource_class", ")", "return", "cls", "(", "...
Creates a new representer for the given resource class. The representer obtains a reference to the (freshly created or looked up) mapping for the resource class.
[ "Creates", "a", "new", "representer", "for", "the", "given", "resource", "class", "." ]
python
train
40.8
django-extensions/django-extensions
django_extensions/management/commands/merge_model_instances.py
https://github.com/django-extensions/django-extensions/blob/7e0bef97ea6cb7f9eea5e2528e3a985a83a7b9b8/django_extensions/management/commands/merge_model_instances.py#L132-L222
def merge_model_instances(self, primary_object, alias_objects): """ Merge several model instances into one, the `primary_object`. Use this function to merge model objects and migrate all of the related fields from the alias objects the primary object. """ generic_fields = get_generic_fields() # get related fields related_fields = list(filter( lambda x: x.is_relation is True, primary_object._meta.get_fields())) many_to_many_fields = list(filter( lambda x: x.many_to_many is True, related_fields)) related_fields = list(filter( lambda x: x.many_to_many is False, related_fields)) # Loop through all alias objects and migrate their references to the # primary object deleted_objects = [] deleted_objects_count = 0 for alias_object in alias_objects: # Migrate all foreign key references from alias object to primary # object. for many_to_many_field in many_to_many_fields: alias_varname = many_to_many_field.name related_objects = getattr(alias_object, alias_varname) for obj in related_objects.all(): try: # Handle regular M2M relationships. getattr(alias_object, alias_varname).remove(obj) getattr(primary_object, alias_varname).add(obj) except AttributeError: # Handle M2M relationships with a 'through' model. # This does not delete the 'through model. # TODO: Allow the user to delete a duplicate 'through' model. through_model = getattr(alias_object, alias_varname).through kwargs = { many_to_many_field.m2m_reverse_field_name(): obj, many_to_many_field.m2m_field_name(): alias_object, } through_model_instances = through_model.objects.filter(**kwargs) for instance in through_model_instances: # Re-attach the through model to the primary_object setattr( instance, many_to_many_field.m2m_field_name(), primary_object) instance.save() # TODO: Here, try to delete duplicate instances that are # disallowed by a unique_together constraint for related_field in related_fields: if related_field.one_to_many: alias_varname = related_field.get_accessor_name() related_objects = getattr(alias_object, alias_varname) for obj in related_objects.all(): field_name = related_field.field.name setattr(obj, field_name, primary_object) obj.save() elif related_field.one_to_one or related_field.many_to_one: alias_varname = related_field.name related_object = getattr(alias_object, alias_varname) primary_related_object = getattr(primary_object, alias_varname) if primary_related_object is None: setattr(primary_object, alias_varname, related_object) primary_object.save() elif related_field.one_to_one: self.stdout.write("Deleted {} with id {}\n".format( related_object, related_object.id)) related_object.delete() for field in generic_fields: filter_kwargs = {} filter_kwargs[field.fk_field] = alias_object._get_pk_val() filter_kwargs[field.ct_field] = field.get_content_type(alias_object) related_objects = field.model.objects.filter(**filter_kwargs) for generic_related_object in related_objects: setattr(generic_related_object, field.name, primary_object) generic_related_object.save() if alias_object.id: deleted_objects += [alias_object] self.stdout.write("Deleted {} with id {}\n".format( alias_object, alias_object.id)) alias_object.delete() deleted_objects_count += 1 return primary_object, deleted_objects, deleted_objects_count
[ "def", "merge_model_instances", "(", "self", ",", "primary_object", ",", "alias_objects", ")", ":", "generic_fields", "=", "get_generic_fields", "(", ")", "# get related fields", "related_fields", "=", "list", "(", "filter", "(", "lambda", "x", ":", "x", ".", "i...
Merge several model instances into one, the `primary_object`. Use this function to merge model objects and migrate all of the related fields from the alias objects the primary object.
[ "Merge", "several", "model", "instances", "into", "one", "the", "primary_object", ".", "Use", "this", "function", "to", "merge", "model", "objects", "and", "migrate", "all", "of", "the", "related", "fields", "from", "the", "alias", "objects", "the", "primary",...
python
train
50.89011
JoelBender/bacpypes
py34/bacpypes/service/object.py
https://github.com/JoelBender/bacpypes/blob/4111b8604a16fa2b7f80d8104a43b9f3e28dfc78/py34/bacpypes/service/object.py#L193-L223
def read_property_to_result_element(obj, propertyIdentifier, propertyArrayIndex=None): """Read the specified property of the object, with the optional array index, and cast the result into an Any object.""" if _debug: read_property_to_result_element._debug("read_property_to_result_element %s %r %r", obj, propertyIdentifier, propertyArrayIndex) # save the result in the property value read_result = ReadAccessResultElementChoice() try: if not obj: raise ExecutionError(errorClass='object', errorCode='unknownObject') read_result.propertyValue = read_property_to_any(obj, propertyIdentifier, propertyArrayIndex) if _debug: read_property_to_result_element._debug(" - success") except PropertyError as error: if _debug: read_property_to_result_element._debug(" - error: %r", error) read_result.propertyAccessError = ErrorType(errorClass='property', errorCode='unknownProperty') except ExecutionError as error: if _debug: read_property_to_result_element._debug(" - error: %r", error) read_result.propertyAccessError = ErrorType(errorClass=error.errorClass, errorCode=error.errorCode) # make an element for this value read_access_result_element = ReadAccessResultElement( propertyIdentifier=propertyIdentifier, propertyArrayIndex=propertyArrayIndex, readResult=read_result, ) if _debug: read_property_to_result_element._debug(" - read_access_result_element: %r", read_access_result_element) # fini return read_access_result_element
[ "def", "read_property_to_result_element", "(", "obj", ",", "propertyIdentifier", ",", "propertyArrayIndex", "=", "None", ")", ":", "if", "_debug", ":", "read_property_to_result_element", ".", "_debug", "(", "\"read_property_to_result_element %s %r %r\"", ",", "obj", ",", ...
Read the specified property of the object, with the optional array index, and cast the result into an Any object.
[ "Read", "the", "specified", "property", "of", "the", "object", "with", "the", "optional", "array", "index", "and", "cast", "the", "result", "into", "an", "Any", "object", "." ]
python
train
50.516129
CxAalto/gtfspy
gtfspy/routing/node_profile_analyzer_time_and_veh_legs.py
https://github.com/CxAalto/gtfspy/blob/bddba4b74faae6c1b91202f19184811e326547e5/gtfspy/routing/node_profile_analyzer_time_and_veh_legs.py#L47-L56
def _truncate_colormap(cmap, minval=0.0, maxval=1.0, n=100): """ Truncates a colormap to use. Code originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib """ new_cmap = LinearSegmentedColormap.from_list( 'trunc({n},{a:.2f},{b:.2f})'.format(n=cmap.name, a=minval, b=maxval), cmap(numpy.linspace(minval, maxval, n)) ) return new_cmap
[ "def", "_truncate_colormap", "(", "cmap", ",", "minval", "=", "0.0", ",", "maxval", "=", "1.0", ",", "n", "=", "100", ")", ":", "new_cmap", "=", "LinearSegmentedColormap", ".", "from_list", "(", "'trunc({n},{a:.2f},{b:.2f})'", ".", "format", "(", "n", "=", ...
Truncates a colormap to use. Code originall from http://stackoverflow.com/questions/18926031/how-to-extract-a-subset-of-a-colormap-as-a-new-colormap-in-matplotlib
[ "Truncates", "a", "colormap", "to", "use", ".", "Code", "originall", "from", "http", ":", "//", "stackoverflow", ".", "com", "/", "questions", "/", "18926031", "/", "how", "-", "to", "-", "extract", "-", "a", "-", "subset", "-", "of", "-", "a", "-", ...
python
valid
44
ThreatConnect-Inc/tcex
tcex/tcex.py
https://github.com/ThreatConnect-Inc/tcex/blob/dd4d7a1ef723af1561687120191886b9a2fd4b47/tcex/tcex.py#L964-L980
def safe_tag(self, tag, errors='strict'): """URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API. Args: tag (string): The tag to be truncated Returns: (string): The truncated tag """ if tag is not None: try: # handle unicode characters and url encode tag value tag = quote(self.s(tag, errors=errors), safe='~')[:128] except KeyError as e: warn = 'Failed converting tag to safetag ({})'.format(e) self.log.warning(warn) return tag
[ "def", "safe_tag", "(", "self", ",", "tag", ",", "errors", "=", "'strict'", ")", ":", "if", "tag", "is", "not", "None", ":", "try", ":", "# handle unicode characters and url encode tag value", "tag", "=", "quote", "(", "self", ".", "s", "(", "tag", ",", ...
URL Encode and truncate tag to match limit (128 characters) of ThreatConnect API. Args: tag (string): The tag to be truncated Returns: (string): The truncated tag
[ "URL", "Encode", "and", "truncate", "tag", "to", "match", "limit", "(", "128", "characters", ")", "of", "ThreatConnect", "API", "." ]
python
train
35.470588
LinuxChristian/pyW215
pyW215/pyW215.py
https://github.com/LinuxChristian/pyW215/blob/63e50b8ee11bc38ed66554f9b92429b552dda550/pyW215/pyW215.py#L199-L222
def current_consumption(self): """Get the current power consumption in Watt.""" res = 'N/A' if self.use_legacy_protocol: # Use /my_cgi.cgi to retrieve current consumption try: res = self.fetchMyCgi()['Meter Watt'] except: return 'N/A' else: try: res = self.SOAPAction('GetCurrentPowerConsumption', 'CurrentConsumption', self.moduleParameters("2")) except: return 'N/A' if res is None: return 'N/A' try: res = float(res) except ValueError: _LOGGER.error("Failed to retrieve current power consumption from SmartPlug") return res
[ "def", "current_consumption", "(", "self", ")", ":", "res", "=", "'N/A'", "if", "self", ".", "use_legacy_protocol", ":", "# Use /my_cgi.cgi to retrieve current consumption", "try", ":", "res", "=", "self", ".", "fetchMyCgi", "(", ")", "[", "'Meter Watt'", "]", "...
Get the current power consumption in Watt.
[ "Get", "the", "current", "power", "consumption", "in", "Watt", "." ]
python
train
30.416667
openvax/mhcflurry
mhcflurry/parallelism.py
https://github.com/openvax/mhcflurry/blob/deb7c1629111254b484a2711619eb2347db36524/mhcflurry/parallelism.py#L115-L188
def make_worker_pool( processes=None, initializer=None, initializer_kwargs_per_process=None, max_tasks_per_worker=None): """ Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool """ if not processes: processes = cpu_count() pool_kwargs = { 'processes': processes, } if max_tasks_per_worker: pool_kwargs["maxtasksperchild"] = max_tasks_per_worker if initializer: if initializer_kwargs_per_process: assert len(initializer_kwargs_per_process) == processes kwargs_queue = Queue() kwargs_queue_backup = Queue() for kwargs in initializer_kwargs_per_process: kwargs_queue.put(kwargs) kwargs_queue_backup.put(kwargs) pool_kwargs["initializer"] = worker_init_entry_point pool_kwargs["initargs"] = ( initializer, kwargs_queue, kwargs_queue_backup) else: pool_kwargs["initializer"] = initializer worker_pool = Pool(**pool_kwargs) print("Started pool: %s" % str(worker_pool)) pprint(pool_kwargs) return worker_pool
[ "def", "make_worker_pool", "(", "processes", "=", "None", ",", "initializer", "=", "None", ",", "initializer_kwargs_per_process", "=", "None", ",", "max_tasks_per_worker", "=", "None", ")", ":", "if", "not", "processes", ":", "processes", "=", "cpu_count", "(", ...
Convenience wrapper to create a multiprocessing.Pool. This function adds support for per-worker initializer arguments, which are not natively supported by the multiprocessing module. The motivation for this feature is to support allocating each worker to a (different) GPU. IMPLEMENTATION NOTE: The per-worker initializer arguments are implemented using a Queue. Each worker reads its arguments from this queue when it starts. When it terminates, it adds its initializer arguments back to the queue, so a future process can initialize itself using these arguments. There is one issue with this approach, however. If a worker crashes, it never repopulates the queue of initializer arguments. This will prevent any future worker from re-using those arguments. To deal with this issue we add a second 'backup queue'. This queue always contains the full set of initializer arguments: whenever a worker reads from it, it always pushes the pop'd args back to the end of the queue immediately. If the primary arg queue is ever empty, then workers will read from this backup queue. Parameters ---------- processes : int Number of workers. Default: num CPUs. initializer : function, optional Init function to call in each worker initializer_kwargs_per_process : list of dict, optional Arguments to pass to initializer function for each worker. Length of list must equal the number of workers. max_tasks_per_worker : int, optional Restart workers after this many tasks. Requires Python >=3.2. Returns ------- multiprocessing.Pool
[ "Convenience", "wrapper", "to", "create", "a", "multiprocessing", ".", "Pool", "." ]
python
train
37.135135
clusterpoint/python-client-api
pycps/response.py
https://github.com/clusterpoint/python-client-api/blob/fabf9bd8355aa54ba08fd6649e48f16e2c35eacd/pycps/response.py#L324-L335
def get_words(self): """ Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term. """ return dict([(word_list.attrib['to'], dict([(word.text, word.attrib['count']) for word in word_list.findall('word')])) for word_list in self._content.findall('list')])
[ "def", "get_words", "(", "self", ")", ":", "return", "dict", "(", "[", "(", "word_list", ".", "attrib", "[", "'to'", "]", ",", "dict", "(", "[", "(", "word", ".", "text", ",", "word", ".", "attrib", "[", "'count'", "]", ")", "for", "word", "in", ...
Get words matching the request search terms. Returns: A dict in form: {<search term>: {<matching word>: <number of times this word is found in the Storage> } // Repeated for every matching word. } // Repeated for every search term.
[ "Get", "words", "matching", "the", "request", "search", "terms", "." ]
python
train
51.583333
rhelmot/nclib
nclib/netcat.py
https://github.com/rhelmot/nclib/blob/6147779766557ee4fafcbae683bdd2f74157e825/nclib/netcat.py#L697-L710
def send(self, s): """ Sends all the given data to the socket. Aliases: write, put, sendall, send_all """ self._print_header('======== Sending ({0}) ========'.format(len(s))) self._log_send(s) out = len(s) while s: s = s[self._send(s):] return out
[ "def", "send", "(", "self", ",", "s", ")", ":", "self", ".", "_print_header", "(", "'======== Sending ({0}) ========'", ".", "format", "(", "len", "(", "s", ")", ")", ")", "self", ".", "_log_send", "(", "s", ")", "out", "=", "len", "(", "s", ")", "...
Sends all the given data to the socket. Aliases: write, put, sendall, send_all
[ "Sends", "all", "the", "given", "data", "to", "the", "socket", "." ]
python
train
22.928571
stevepeak/inquiry
inquiry/garden.py
https://github.com/stevepeak/inquiry/blob/f6ea435c302560ba19985b5d4ce2c97e2f321508/inquiry/garden.py#L98-L256
def _harvest_validate(self, userkwargs): """Validate and Plant user provided arguments - Go through and plants the seedlings for any user arguments provided. - Validate the arguments, cleaning and adapting (valideer wise) - Extract negatives "!" arguments """ # the valideer to parse the # user arguemnts when watering parser = {} userkwargs.update(self.network_kwargs) # a simple set of original provided argument keys (used in IGNORES) original_kwargs = set(map(lambda k: k.split('_')[1] if k.find('_')>-1 else k, userkwargs.keys())) # list of columns that are required from seeds requires = [] # ------------- # Clean up Aggs # ------------- for key in userkwargs.keys(): # agg example: "avg_total", "max_tax" if key.find('_') > 0: agg, base = tuple(key.split('_')) if base in userkwargs: if type(userkwargs[base]) is not list: userkwargs[base] = [(None, userkwargs[base])] userkwargs[base].append( (agg, userkwargs.pop(key)) ) else: userkwargs[base] = [(agg, userkwargs.pop(key))] # ----------------- # Process Arguments # ----------------- for key, seed in self.arguments.iteritems(): # -------------- # Argument Alias # -------------- if seed.get('alias') and key in userkwargs: # pop the value form the user kwargs (to change the key later) value = userkwargs.pop(key) if key in userkwargs else NotImplemented # for duplicate keys oldkey = key+"" # change the key key = seed.get('alias') # change the seed seed = get(self.arguments, seed.get('alias')) # set the new key:value if value is not NotImplemented: if key in userkwargs: raise valideer.ValidationError("Argument alias already specified for `%s` via `%s`" % (oldkey, key), oldkey) userkwargs[key] = value # can provide multiple arguments if key.endswith('[]'): multi = True key = key[:-2] else: multi = False # get value(s) from user if key in userkwargs: value = userkwargs.pop(key) elif seed.get('copy'): value = userkwargs.get(seed.get('copy')) else: value = seed.get('default') # no argument provided, lets continue) if value is None or value == []: if seed.get('required'): raise valideer.ValidationError("missing required property: %s" % key, key) else: continue # add requires requires.extend(array(get(seed, 'requires', []))) # ----------- # Inheritance # ----------- # not permited from arguements yet. would need to happen above the ""PROCESS ARGUMENT"" block # self._inherit(*array(get(seed, 'inherit', []))) if type(value) is list and type(value[0]) is tuple: # complex for v in value: ud, pd = self._harvest_args(key, seed, v, multi) userkwargs.update(ud) parser.update(pd) else: ud, pd = self._harvest_args(key, seed, value, multi) userkwargs.update(ud) parser.update(pd) # ------------ # Ignored Keys # ------------ for seed in self.seeds: ignores = set(array(get(seed, 'ignore'))) if ignores: if ignores & original_kwargs: if not get(seed, 'silent'): additionals = ignores & original_kwargs raise valideer.ValidationError("additional properties: %s" % ",".join(additionals), additionals) [userkwargs.pop(key) for key in ignores if key in userkwargs] # ------------------------- # Custom Operators (part 1) # ------------------------- operators = {} for key, value in userkwargs.items(): rk = key agg = None if key.find('_')>-1: agg, rk = tuple(key.split('_')) seed = self.arguments.get(rk, self.arguments.get(rk+'[]')) if seed: if type(value) is list: operators[key] = [] # need to remove the operator for validating new_values = [] for v in value: operator, v = self._operator(v, *seed.get('column', "").rsplit("::", 1)) new_values.append(v) operators[key].append((agg, operator) if agg else operator) userkwargs[key] = new_values else: operator, value = self._operator(value, *seed.get('column', "").rsplit("::", 1)) operators[key] = (agg, operator) if agg else operator userkwargs[key] = value # ----------------- # Plant Sort Method # ----------------- if 'sortby' in userkwargs: seed = self.arguments.get(userkwargs['sortby'].lower(), self.arguments.get(userkwargs['sortby'].lower()+'[]')) if seed: seed['id'] = str(userkwargs['sortby'].lower()) for r in set(requires): if userkwargs.get(r) is None: raise valideer.ValidationError("required property not set: %s" % r, r) # -------- # Validate # -------- parser = valideer.parse(parser, additional_properties=False) validated = parser.validate(userkwargs, adapt=self.navigator.adapter()) validated.update(self.network_kwargs) # operators validated # --------------------------- | -------------------------------- # { { # "type": ["!", "!"], "type": ['a', 'b'], # "total": "<", "total": "50", # "tax": ("avg, ">"), "tax": "1", # "time": None "time": "2014" # } } return operators, validated
[ "def", "_harvest_validate", "(", "self", ",", "userkwargs", ")", ":", "# the valideer to parse the", "# user arguemnts when watering", "parser", "=", "{", "}", "userkwargs", ".", "update", "(", "self", ".", "network_kwargs", ")", "# a simple set of original provided argum...
Validate and Plant user provided arguments - Go through and plants the seedlings for any user arguments provided. - Validate the arguments, cleaning and adapting (valideer wise) - Extract negatives "!" arguments
[ "Validate", "and", "Plant", "user", "provided", "arguments", "-", "Go", "through", "and", "plants", "the", "seedlings", "for", "any", "user", "arguments", "provided", ".", "-", "Validate", "the", "arguments", "cleaning", "and", "adapting", "(", "valideer", "wi...
python
train
40.874214
mcocdawc/chemcoord
src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py
https://github.com/mcocdawc/chemcoord/blob/95561ce387c142227c38fb14a1d182179aef8f5f/src/chemcoord/cartesian_coordinates/_cartesian_class_get_zmat.py#L637-L739
def get_grad_zmat(self, construction_table, as_function=True): r"""Return the gradient for the transformation to a Zmatrix. If ``as_function`` is True, a function is returned that can be directly applied onto instances of :class:`~Cartesian`, which contain the applied distortions in cartesian space. In this case the user does not have to worry about indexing and correct application of the tensor product. Basically this is the function :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. If ``as_function`` is False, a ``(3, n, n, 3)`` tensor is returned, which contains the values of the derivatives. Since a ``n * 3`` matrix is deriven after a ``n * 3`` matrix, it is important to specify the used rules for indexing the resulting tensor. The rule is very simple: The indices of the numerator are used first then the indices of the denominator get swapped and appended: .. math:: \left( \frac{\partial \mathbf{Y}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{Y}_{i, j}}{\partial \mathbf{X}_{l, k}} Applying this rule to an example function: .. math:: f \colon \mathbb{R}^3 \rightarrow \mathbb{R} Gives as derivative the known row-vector gradient: .. math:: (\nabla f)_{1, i} = \frac{\partial f}{\partial x_i} \qquad i \in \{1, 2, 3\} .. note:: The row wise alignment of the XYZ files makes sense for these CSV like files. But it is mathematically advantageous and sometimes (depending on the memory layout) numerically better to use a column wise alignment of the coordinates. In this function the resulting tensor assumes a ``3 * n`` array for the coordinates. If .. math:: \mathbf{X}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n \\ \mathbf{C}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n denote the positions in cartesian and Zmatrix space, The complete tensor may be written as: .. math:: \left( \frac{\partial \mathbf{C}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{C}_{i, j}}{\partial \mathbf{X}_{l, k}} Args: construction_table (pandas.DataFrame): as_function (bool): Return a tensor or :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. Returns: (func, np.array): Depending on ``as_function`` return a tensor or :func:`~chemcoord.xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. """ if (construction_table.index != self.index).any(): message = "construction_table and self must use the same index" raise ValueError(message) c_table = construction_table.loc[:, ['b', 'a', 'd']] c_table = c_table.replace(constants.int_label) c_table = c_table.replace({k: v for v, k in enumerate(c_table.index)}) c_table = c_table.values.T X = self.loc[:, ['x', 'y', 'z']].values.T if X.dtype == np.dtype('i8'): X = X.astype('f8') err, row, grad_C = transformation.get_grad_C(X, c_table) if err == ERR_CODE_InvalidReference: rename = dict(enumerate(self.index)) i = rename[row] b, a, d = construction_table.loc[i, ['b', 'a', 'd']] raise InvalidReference(i=i, b=b, a=a, d=d) if as_function: return partial(xyz_functions.apply_grad_zmat_tensor, grad_C, construction_table) else: return grad_C
[ "def", "get_grad_zmat", "(", "self", ",", "construction_table", ",", "as_function", "=", "True", ")", ":", "if", "(", "construction_table", ".", "index", "!=", "self", ".", "index", ")", ".", "any", "(", ")", ":", "message", "=", "\"construction_table and se...
r"""Return the gradient for the transformation to a Zmatrix. If ``as_function`` is True, a function is returned that can be directly applied onto instances of :class:`~Cartesian`, which contain the applied distortions in cartesian space. In this case the user does not have to worry about indexing and correct application of the tensor product. Basically this is the function :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. If ``as_function`` is False, a ``(3, n, n, 3)`` tensor is returned, which contains the values of the derivatives. Since a ``n * 3`` matrix is deriven after a ``n * 3`` matrix, it is important to specify the used rules for indexing the resulting tensor. The rule is very simple: The indices of the numerator are used first then the indices of the denominator get swapped and appended: .. math:: \left( \frac{\partial \mathbf{Y}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{Y}_{i, j}}{\partial \mathbf{X}_{l, k}} Applying this rule to an example function: .. math:: f \colon \mathbb{R}^3 \rightarrow \mathbb{R} Gives as derivative the known row-vector gradient: .. math:: (\nabla f)_{1, i} = \frac{\partial f}{\partial x_i} \qquad i \in \{1, 2, 3\} .. note:: The row wise alignment of the XYZ files makes sense for these CSV like files. But it is mathematically advantageous and sometimes (depending on the memory layout) numerically better to use a column wise alignment of the coordinates. In this function the resulting tensor assumes a ``3 * n`` array for the coordinates. If .. math:: \mathbf{X}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n \\ \mathbf{C}_{i, j} &\qquad 1 \leq i \leq 3, \quad 1 \leq j \leq n denote the positions in cartesian and Zmatrix space, The complete tensor may be written as: .. math:: \left( \frac{\partial \mathbf{C}}{\partial \mathbf{X}} \right)_{i, j, k, l} = \frac{\partial \mathbf{C}_{i, j}}{\partial \mathbf{X}_{l, k}} Args: construction_table (pandas.DataFrame): as_function (bool): Return a tensor or :func:`xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments. Returns: (func, np.array): Depending on ``as_function`` return a tensor or :func:`~chemcoord.xyz_functions.apply_grad_zmat_tensor` with partially replaced arguments.
[ "r", "Return", "the", "gradient", "for", "the", "transformation", "to", "a", "Zmatrix", "." ]
python
train
37.805825
inasafe/inasafe
safe/definitions/utilities.py
https://github.com/inasafe/inasafe/blob/831d60abba919f6d481dc94a8d988cc205130724/safe/definitions/utilities.py#L297-L320
def get_name(key): """Given a keyword, try to get the name of it. .. versionadded:: 4.2 Definition dicts are defined in keywords.py. We try to return the name if present, otherwise we return none. keyword = 'layer_purpose' kio = safe.utilities.keyword_io.Keyword_IO() name = kio.get_name(keyword) print name :param key: A keyword key. :type key: str :returns: The name of the keyword :rtype: str """ definition_dict = definition(key) if definition_dict: return definition_dict.get('name', key) # Else, return the keyword return key
[ "def", "get_name", "(", "key", ")", ":", "definition_dict", "=", "definition", "(", "key", ")", "if", "definition_dict", ":", "return", "definition_dict", ".", "get", "(", "'name'", ",", "key", ")", "# Else, return the keyword", "return", "key" ]
Given a keyword, try to get the name of it. .. versionadded:: 4.2 Definition dicts are defined in keywords.py. We try to return the name if present, otherwise we return none. keyword = 'layer_purpose' kio = safe.utilities.keyword_io.Keyword_IO() name = kio.get_name(keyword) print name :param key: A keyword key. :type key: str :returns: The name of the keyword :rtype: str
[ "Given", "a", "keyword", "try", "to", "get", "the", "name", "of", "it", "." ]
python
train
24.541667
cjdrake/pyeda
pyeda/parsing/boolexpr.py
https://github.com/cjdrake/pyeda/blob/554ee53aa678f4b61bcd7e07ba2c74ddc749d665/pyeda/parsing/boolexpr.py#L622-L627
def _names(lexer): """Return a tuple of names.""" first = _expect_token(lexer, {NameToken}).value rest = _zom_name(lexer) rnames = (first, ) + rest return rnames[::-1]
[ "def", "_names", "(", "lexer", ")", ":", "first", "=", "_expect_token", "(", "lexer", ",", "{", "NameToken", "}", ")", ".", "value", "rest", "=", "_zom_name", "(", "lexer", ")", "rnames", "=", "(", "first", ",", ")", "+", "rest", "return", "rnames", ...
Return a tuple of names.
[ "Return", "a", "tuple", "of", "names", "." ]
python
train
30.333333
pyroscope/auvyon
src/auvyon/imaging/waveforms.py
https://github.com/pyroscope/auvyon/blob/5115c26f966df03df92a9934580b66c72e23d4e8/src/auvyon/imaging/waveforms.py#L23-L42
def waveform_image(mediafile, xy_size, outdir=None, center_color=None, outer_color=None, bg_color=None): """ Create waveform image from audio data. Return path to created image file. """ try: import waveform except ImportError, exc: raise ImportError("%s [get it at https://github.com/superjoe30/PyWaveform]" % exc) outdir = outdir or os.path.dirname(mediafile) outfile = os.path.join(outdir, os.path.splitext(os.path.basename(mediafile))[0] + ".png") with transcode.to_wav(mediafile) as wavfile: # Draw using a gradient waveform.draw(wavfile, outfile, xy_size, bgColor=bg_color or WAVE_BG_COLOR, fgGradientCenter=center_color or WAVE_CENTER_COLOR, fgGradientOuter=outer_color or WAVE_OUTER_COLOR) return outfile
[ "def", "waveform_image", "(", "mediafile", ",", "xy_size", ",", "outdir", "=", "None", ",", "center_color", "=", "None", ",", "outer_color", "=", "None", ",", "bg_color", "=", "None", ")", ":", "try", ":", "import", "waveform", "except", "ImportError", ","...
Create waveform image from audio data. Return path to created image file.
[ "Create", "waveform", "image", "from", "audio", "data", ".", "Return", "path", "to", "created", "image", "file", "." ]
python
train
40.3
nicolargo/glances
glances/processes.py
https://github.com/nicolargo/glances/blob/5bd4d587a736e0d2b03170b56926841d2a3eb7ee/glances/processes.py#L89-L102
def update_processcount(self, plist): """Update the global process count from the current processes list""" # Update the maximum process ID (pid) number self.processcount['pid_max'] = self.pid_max # For each key in the processcount dict # count the number of processes with the same status for k in iterkeys(self.processcount): self.processcount[k] = len(list(filter(lambda v: v['status'] is k, plist))) # Compute thread self.processcount['thread'] = sum(i['num_threads'] for i in plist if i['num_threads'] is not None) # Compute total self.processcount['total'] = len(plist)
[ "def", "update_processcount", "(", "self", ",", "plist", ")", ":", "# Update the maximum process ID (pid) number", "self", ".", "processcount", "[", "'pid_max'", "]", "=", "self", ".", "pid_max", "# For each key in the processcount dict", "# count the number of processes with...
Update the global process count from the current processes list
[ "Update", "the", "global", "process", "count", "from", "the", "current", "processes", "list" ]
python
train
53.357143
google/grr
grr/server/grr_response_server/aff4.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/aff4.py#L1843-L1867
def UpdateLease(self, duration): """Updates the lease and flushes the object. The lease is set to expire after the "duration" time from the present moment. This method is supposed to be used when operation that requires locking may run for a time that exceeds the lease time specified in OpenWithLock(). See flows/hunts locking for an example. Args: duration: Integer number of seconds. Lease expiry time will be set to "time.time() + duration". Raises: LockError: if the object is not currently locked or the lease has expired. """ if not self.locked: raise LockError("Object must be locked to update the lease: %s." % self.urn) if self.CheckLease() == 0: self._RaiseLockError("UpdateLease") self.transaction.UpdateLease(duration)
[ "def", "UpdateLease", "(", "self", ",", "duration", ")", ":", "if", "not", "self", ".", "locked", ":", "raise", "LockError", "(", "\"Object must be locked to update the lease: %s.\"", "%", "self", ".", "urn", ")", "if", "self", ".", "CheckLease", "(", ")", "...
Updates the lease and flushes the object. The lease is set to expire after the "duration" time from the present moment. This method is supposed to be used when operation that requires locking may run for a time that exceeds the lease time specified in OpenWithLock(). See flows/hunts locking for an example. Args: duration: Integer number of seconds. Lease expiry time will be set to "time.time() + duration". Raises: LockError: if the object is not currently locked or the lease has expired.
[ "Updates", "the", "lease", "and", "flushes", "the", "object", "." ]
python
train
33.24
tensorflow/probability
tensorflow_probability/python/glm/fisher_scoring.py
https://github.com/tensorflow/probability/blob/e87fe34111d68c35db0f9eeb4935f1ece9e1a8f5/tensorflow_probability/python/glm/fisher_scoring.py#L517-L620
def prepare_args(model_matrix, response, model_coefficients, predicted_linear_response, offset, name=None): """Helper to `fit` which sanitizes input args. Args: model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row represents a sample's features. response: (Batch of) vector-shaped `Tensor` where each element represents a sample's observed response (to the corresponding row of features). Must have same `dtype` as `model_matrix`. model_coefficients: Optional (batch of) vector-shaped `Tensor` representing the model coefficients, one for each column in `model_matrix`. Must have same `dtype` as `model_matrix`. Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`. predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching `response`; represents `offset` shifted initial linear predictions based on current `model_coefficients`. Default value: `offset` if `model_coefficients is None`, and `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset` otherwise. offset: Optional `Tensor` with `shape`, `dtype` matching `response`; represents constant shift applied to `predicted_linear_response`. Default value: `None` (i.e., `tf.zeros_like(response)`). name: Python `str` used as name prefix to ops created by this function. Default value: `"prepare_args"`. Returns: model_matrix: A `Tensor` with `shape`, `dtype` and values of the `model_matrix` argument. response: A `Tensor` with `shape`, `dtype` and values of the `response` argument. model_coefficients_start: A `Tensor` with `shape`, `dtype` and values of the `model_coefficients_start` argument if specified. A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix` containing the default starting point otherwise. predicted_linear_response: A `Tensor` with `shape`, `dtype` and values of the `predicted_linear_response` argument if specified. A `Tensor` with `shape`, `dtype` matching `response` containing the default value otherwise. offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument if specified or `None` otherwise. """ graph_deps = [model_matrix, response, model_coefficients, predicted_linear_response, offset] with tf.compat.v1.name_scope(name, 'prepare_args', graph_deps): dtype = dtype_util.common_dtype(graph_deps, np.float32) model_matrix = tf.convert_to_tensor( value=model_matrix, dtype=dtype, name='model_matrix') if offset is not None: offset = tf.convert_to_tensor(value=offset, dtype=dtype, name='offset') response = tf.convert_to_tensor( value=response, dtype=dtype, name='response') use_default_model_coefficients = model_coefficients is None if use_default_model_coefficients: # User did not supply model coefficients; assume they're all zero. batch_shape = tf.shape(input=model_matrix)[:-2] num_columns = tf.shape(input=model_matrix)[-1] model_coefficients = tf.zeros( shape=tf.concat([batch_shape, [num_columns]], axis=0), dtype=dtype, name='model_coefficients') else: # User did supply model coefficients; convert to Tensor in case it's # numpy or literal. model_coefficients = tf.convert_to_tensor( value=model_coefficients, dtype=dtype, name='model_coefficients') if predicted_linear_response is None: if use_default_model_coefficients: # Since we're using zeros for model_coefficients, we know the predicted # linear response will also be all zeros. if offset is None: predicted_linear_response = tf.zeros_like( response, dtype, name='predicted_linear_response') else: predicted_linear_response = tf.broadcast_to( offset, tf.shape(input=response), name='predicted_linear_response') else: # We were given model_coefficients but not the predicted linear # response. predicted_linear_response = calculate_linear_predictor( model_matrix, model_coefficients, offset) else: predicted_linear_response = tf.convert_to_tensor( value=predicted_linear_response, dtype=dtype, name='predicted_linear_response') return [ model_matrix, response, model_coefficients, predicted_linear_response, offset, ]
[ "def", "prepare_args", "(", "model_matrix", ",", "response", ",", "model_coefficients", ",", "predicted_linear_response", ",", "offset", ",", "name", "=", "None", ")", ":", "graph_deps", "=", "[", "model_matrix", ",", "response", ",", "model_coefficients", ",", ...
Helper to `fit` which sanitizes input args. Args: model_matrix: (Batch of) `float`-like, matrix-shaped `Tensor` where each row represents a sample's features. response: (Batch of) vector-shaped `Tensor` where each element represents a sample's observed response (to the corresponding row of features). Must have same `dtype` as `model_matrix`. model_coefficients: Optional (batch of) vector-shaped `Tensor` representing the model coefficients, one for each column in `model_matrix`. Must have same `dtype` as `model_matrix`. Default value: `tf.zeros(tf.shape(model_matrix)[-1], model_matrix.dtype)`. predicted_linear_response: Optional `Tensor` with `shape`, `dtype` matching `response`; represents `offset` shifted initial linear predictions based on current `model_coefficients`. Default value: `offset` if `model_coefficients is None`, and `tf.linalg.matvec(model_matrix, model_coefficients_start) + offset` otherwise. offset: Optional `Tensor` with `shape`, `dtype` matching `response`; represents constant shift applied to `predicted_linear_response`. Default value: `None` (i.e., `tf.zeros_like(response)`). name: Python `str` used as name prefix to ops created by this function. Default value: `"prepare_args"`. Returns: model_matrix: A `Tensor` with `shape`, `dtype` and values of the `model_matrix` argument. response: A `Tensor` with `shape`, `dtype` and values of the `response` argument. model_coefficients_start: A `Tensor` with `shape`, `dtype` and values of the `model_coefficients_start` argument if specified. A (batch of) vector-shaped `Tensors` with `dtype` matching `model_matrix` containing the default starting point otherwise. predicted_linear_response: A `Tensor` with `shape`, `dtype` and values of the `predicted_linear_response` argument if specified. A `Tensor` with `shape`, `dtype` matching `response` containing the default value otherwise. offset: A `Tensor` with `shape`, `dtype` and values of the `offset` argument if specified or `None` otherwise.
[ "Helper", "to", "fit", "which", "sanitizes", "input", "args", "." ]
python
test
43.692308
bcbio/bcbio-nextgen
bcbio/pipeline/region.py
https://github.com/bcbio/bcbio-nextgen/blob/6a9348c0054ccd5baffd22f1bb7d0422f6978b20/bcbio/pipeline/region.py#L143-L164
def parallel_prep_region(samples, run_parallel): """Perform full pre-variant calling BAM prep work on regions. """ file_key = "work_bam" split_fn = _split_by_regions("bamprep", "-prep.bam", file_key) # identify samples that do not need preparation -- no recalibration or realignment extras = [] torun = [] for data in [x[0] for x in samples]: if data.get("work_bam"): data["align_bam"] = data["work_bam"] if (not dd.get_realign(data) and not dd.get_variantcaller(data)): extras.append([data]) elif not data.get(file_key): extras.append([data]) else: # Do not want to re-run duplicate marking after realignment data["config"]["algorithm"]["orig_markduplicates"] = dd.get_mark_duplicates(data) data = dd.set_mark_duplicates(data, False) torun.append([data]) return extras + parallel_split_combine(torun, split_fn, run_parallel, "piped_bamprep", _add_combine_info, file_key, ["config"])
[ "def", "parallel_prep_region", "(", "samples", ",", "run_parallel", ")", ":", "file_key", "=", "\"work_bam\"", "split_fn", "=", "_split_by_regions", "(", "\"bamprep\"", ",", "\"-prep.bam\"", ",", "file_key", ")", "# identify samples that do not need preparation -- no recali...
Perform full pre-variant calling BAM prep work on regions.
[ "Perform", "full", "pre", "-", "variant", "calling", "BAM", "prep", "work", "on", "regions", "." ]
python
train
48.045455
yamcs/yamcs-python
yamcs-client/examples/parameter_subscription.py
https://github.com/yamcs/yamcs-python/blob/1082fee8a299010cc44416bbb7518fac0ef08b48/yamcs-client/examples/parameter_subscription.py#L34-L61
def manage_subscription(): """Shows how to interact with a parameter subscription.""" subscription = processor.create_parameter_subscription([ '/YSS/SIMULATOR/BatteryVoltage1' ]) sleep(5) print('Adding extra items to the existing subscription...') subscription.add([ '/YSS/SIMULATOR/Alpha', '/YSS/SIMULATOR/BatteryVoltage2', 'MDB:OPS Name/SIMULATOR_PrimBusVoltage1', ]) sleep(5) print('Shrinking subscription...') subscription.remove('/YSS/SIMULATOR/Alpha') print('Cancelling the subscription...') subscription.cancel() print('Last values from cache:') print(subscription.get_value('/YSS/SIMULATOR/BatteryVoltage1')) print(subscription.get_value('/YSS/SIMULATOR/BatteryVoltage2')) print(subscription.get_value('/YSS/SIMULATOR/Alpha')) print(subscription.get_value('MDB:OPS Name/SIMULATOR_PrimBusVoltage1'))
[ "def", "manage_subscription", "(", ")", ":", "subscription", "=", "processor", ".", "create_parameter_subscription", "(", "[", "'/YSS/SIMULATOR/BatteryVoltage1'", "]", ")", "sleep", "(", "5", ")", "print", "(", "'Adding extra items to the existing subscription...'", ")", ...
Shows how to interact with a parameter subscription.
[ "Shows", "how", "to", "interact", "with", "a", "parameter", "subscription", "." ]
python
train
31.607143
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L3985-L4004
def kl_divergence(mu, log_var, mu_p=0.0, log_var_p=0.0): """KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). Args: mu: mu parameter of the distribution. log_var: log(var) parameter of the distribution. mu_p: optional mu from a learned prior distribution log_var_p: optional log(var) from a learned prior distribution Returns: the KL loss. """ batch_size = shape_list(mu)[0] prior_distribution = tfp.distributions.Normal( mu_p, tf.exp(tf.multiply(0.5, log_var_p))) posterior_distribution = tfp.distributions.Normal( mu, tf.exp(tf.multiply(0.5, log_var))) kld = tfp.distributions.kl_divergence(posterior_distribution, prior_distribution) return tf.reduce_sum(kld) / to_float(batch_size)
[ "def", "kl_divergence", "(", "mu", ",", "log_var", ",", "mu_p", "=", "0.0", ",", "log_var_p", "=", "0.0", ")", ":", "batch_size", "=", "shape_list", "(", "mu", ")", "[", "0", "]", "prior_distribution", "=", "tfp", ".", "distributions", ".", "Normal", "...
KL divergence of diagonal gaussian N(mu,exp(log_var)) and N(0,1). Args: mu: mu parameter of the distribution. log_var: log(var) parameter of the distribution. mu_p: optional mu from a learned prior distribution log_var_p: optional log(var) from a learned prior distribution Returns: the KL loss.
[ "KL", "divergence", "of", "diagonal", "gaussian", "N", "(", "mu", "exp", "(", "log_var", "))", "and", "N", "(", "0", "1", ")", "." ]
python
train
38.7
Microsoft/azure-devops-python-api
azure-devops/azure/devops/v5_0/core/core_client.py
https://github.com/Microsoft/azure-devops-python-api/blob/4777ffda2f5052fabbaddb2abe9cb434e0cf1aa8/azure-devops/azure/devops/v5_0/core/core_client.py#L64-L82
def get_connected_services(self, project_id, kind=None): """GetConnectedServices. [Preview API] :param str project_id: :param str kind: :rtype: [WebApiConnectedService] """ route_values = {} if project_id is not None: route_values['projectId'] = self._serialize.url('project_id', project_id, 'str') query_parameters = {} if kind is not None: query_parameters['kind'] = self._serialize.query('kind', kind, 'str') response = self._send(http_method='GET', location_id='b4f70219-e18b-42c5-abe3-98b07d35525e', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters) return self._deserialize('[WebApiConnectedService]', self._unwrap_collection(response))
[ "def", "get_connected_services", "(", "self", ",", "project_id", ",", "kind", "=", "None", ")", ":", "route_values", "=", "{", "}", "if", "project_id", "is", "not", "None", ":", "route_values", "[", "'projectId'", "]", "=", "self", ".", "_serialize", ".", ...
GetConnectedServices. [Preview API] :param str project_id: :param str kind: :rtype: [WebApiConnectedService]
[ "GetConnectedServices", ".", "[", "Preview", "API", "]", ":", "param", "str", "project_id", ":", ":", "param", "str", "kind", ":", ":", "rtype", ":", "[", "WebApiConnectedService", "]" ]
python
train
47.421053
timeyyy/apptools
peasoup/peasoup.py
https://github.com/timeyyy/apptools/blob/d3c0f324b0c2689c35f5601348276f4efd6cb240/peasoup/peasoup.py#L106-L142
def create_cfg(self, cfg_file, defaults=None, mode='json'): ''' set mode to json or yaml? probably remove this option..Todo Creates the config file for your app with default values The file will only be created if it doesn't exits also sets up the first_run attribute. also sets correct windows permissions you can add custom stuff to the config by doing app.cfg['fkdsfa'] = 'fdsaf' # todo auto save on change remember to call cfg.save() ''' assert mode in ('json', 'yaml') self.cfg_mode = mode self.cfg_file = cfg_file try: self.cfg = CfgDict(app=self, cfg=self.load_cfg()) logging.info('cfg file found : %s' % self.cfg_file) except FileNotFoundError: self.cfg = CfgDict(app=self, cfg={'first_run': True}) with suppress(TypeError): self.cfg.update(defaults) self.cfg.save() set_windows_permissions(self.cfg_file) logging.info( 'Created cfg file for first time!: %s' % self.cfg_file) if self._check_first_run(): self.first_run = True else: self.first_run = False
[ "def", "create_cfg", "(", "self", ",", "cfg_file", ",", "defaults", "=", "None", ",", "mode", "=", "'json'", ")", ":", "assert", "mode", "in", "(", "'json'", ",", "'yaml'", ")", "self", ".", "cfg_mode", "=", "mode", "self", ".", "cfg_file", "=", "cfg...
set mode to json or yaml? probably remove this option..Todo Creates the config file for your app with default values The file will only be created if it doesn't exits also sets up the first_run attribute. also sets correct windows permissions you can add custom stuff to the config by doing app.cfg['fkdsfa'] = 'fdsaf' # todo auto save on change remember to call cfg.save()
[ "set", "mode", "to", "json", "or", "yaml?", "probably", "remove", "this", "option", "..", "Todo" ]
python
train
33.297297
aio-libs/aiohttp
aiohttp/web_request.py
https://github.com/aio-libs/aiohttp/blob/9504fe2affaaff673fa4f3754c1c44221f8ba47d/aiohttp/web_request.py#L476-L484
def cookies(self) -> Mapping[str, str]: """Return request cookies. A read-only dictionary-like object. """ raw = self.headers.get(hdrs.COOKIE, '') parsed = SimpleCookie(raw) return MappingProxyType( {key: val.value for key, val in parsed.items()})
[ "def", "cookies", "(", "self", ")", "->", "Mapping", "[", "str", ",", "str", "]", ":", "raw", "=", "self", ".", "headers", ".", "get", "(", "hdrs", ".", "COOKIE", ",", "''", ")", "parsed", "=", "SimpleCookie", "(", "raw", ")", "return", "MappingPro...
Return request cookies. A read-only dictionary-like object.
[ "Return", "request", "cookies", "." ]
python
train
33.333333
facebook/watchman
python/pywatchman_aio/__init__.py
https://github.com/facebook/watchman/blob/d416c249dd8f463dc69fc2691d0f890598c045a9/python/pywatchman_aio/__init__.py#L240-L246
async def receive_bilateral_response(self): """Receive the response to a request made to the Watchman service.""" self._check_receive_loop() resp = await self.bilateral_response_queue.get() self._check_error(resp) return resp
[ "async", "def", "receive_bilateral_response", "(", "self", ")", ":", "self", ".", "_check_receive_loop", "(", ")", "resp", "=", "await", "self", ".", "bilateral_response_queue", ".", "get", "(", ")", "self", ".", "_check_error", "(", "resp", ")", "return", "...
Receive the response to a request made to the Watchman service.
[ "Receive", "the", "response", "to", "a", "request", "made", "to", "the", "Watchman", "service", "." ]
python
train
37.142857
shmuelamar/cbox
cbox/__main__.py
https://github.com/shmuelamar/cbox/blob/2d0cda5b3f61a55e530251430bf3d460dcd3732e/cbox/__main__.py#L77-L95
def get_inline_func(inline_str, modules=None, **stream_kwargs): """returns a function decorated by `cbox.stream` decorator. :param str inline_str: the inline function to execute, can use `s` - local variable as the input line/char/raw (according to `input_type` param). :param str modules: comma separated list of modules to import before running the inline function. :param dict stream_kwargs: optional arguments to `cbox.stream` decorator :rtype: callable """ if not _is_compilable(inline_str): raise ValueError( 'cannot compile the inline expression - "%s"' % inline_str ) inline_globals = _import_inline_modules(modules) func = _inline2func(inline_str, inline_globals, **stream_kwargs) return func
[ "def", "get_inline_func", "(", "inline_str", ",", "modules", "=", "None", ",", "*", "*", "stream_kwargs", ")", ":", "if", "not", "_is_compilable", "(", "inline_str", ")", ":", "raise", "ValueError", "(", "'cannot compile the inline expression - \"%s\"'", "%", "inl...
returns a function decorated by `cbox.stream` decorator. :param str inline_str: the inline function to execute, can use `s` - local variable as the input line/char/raw (according to `input_type` param). :param str modules: comma separated list of modules to import before running the inline function. :param dict stream_kwargs: optional arguments to `cbox.stream` decorator :rtype: callable
[ "returns", "a", "function", "decorated", "by", "cbox", ".", "stream", "decorator", "." ]
python
train
40.578947
bfrog/whizzer
whizzer/defer.py
https://github.com/bfrog/whizzer/blob/a1e43084b3ac8c1f3fb4ada081777cdbf791fd77/whizzer/defer.py#L172-L175
def add_callback(self, callback, *callback_args, **callback_kwargs): """Add a callback without an associated errback.""" return self.add_callbacks(callback, callback_args=callback_args, callback_kwargs=callback_kwargs)
[ "def", "add_callback", "(", "self", ",", "callback", ",", "*", "callback_args", ",", "*", "*", "callback_kwargs", ")", ":", "return", "self", ".", "add_callbacks", "(", "callback", ",", "callback_args", "=", "callback_args", ",", "callback_kwargs", "=", "callb...
Add a callback without an associated errback.
[ "Add", "a", "callback", "without", "an", "associated", "errback", "." ]
python
train
64.5
timothydmorton/isochrones
isochrones/starmodel_old.py
https://github.com/timothydmorton/isochrones/blob/d84495573044c66db2fd6b959fe69e370757ea14/isochrones/starmodel_old.py#L1095-L1140
def load_hdf(cls, filename, path='', name=None): """ A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object. """ store = pd.HDFStore(filename) try: samples = store['{}/samples'.format(path)] attrs = store.get_storer('{}/samples'.format(path)).attrs except: store.close() raise properties = attrs.properties maxAV = attrs.maxAV max_distance = attrs.max_distance min_logg = attrs.min_logg ic_type = attrs.ic_type use_emcee = attrs.use_emcee basename = attrs._mnest_basename if name is None: try: name = attrs.name except: name = '' store.close() #ic = ic_type() don't need to initialize anymore mod = cls(ic_type, maxAV=maxAV, max_distance=max_distance, use_emcee=use_emcee, name=name, **properties) mod._samples = samples mod._mnest_basename = basename return mod
[ "def", "load_hdf", "(", "cls", ",", "filename", ",", "path", "=", "''", ",", "name", "=", "None", ")", ":", "store", "=", "pd", ".", "HDFStore", "(", "filename", ")", "try", ":", "samples", "=", "store", "[", "'{}/samples'", ".", "format", "(", "pa...
A class method to load a saved StarModel from an HDF5 file. File must have been created by a call to :func:`StarModel.save_hdf`. :param filename: H5 file to load. :param path: (optional) Path within HDF file. :return: :class:`StarModel` object.
[ "A", "class", "method", "to", "load", "a", "saved", "StarModel", "from", "an", "HDF5", "file", "." ]
python
train
28.217391
hanguokai/youku
youku/youku_playlists.py
https://github.com/hanguokai/youku/blob/b2df060c7dccfad990bcfa289fff68bb77d1e69b/youku/youku_playlists.py#L33-L43
def find_playlists_by_ids(self, playlist_ids): """doc: http://open.youku.com/docs/doc?id=67 """ url = 'https://openapi.youku.com/v2/playlists/show_batch.json' params = { 'client_id': self.client_id, 'playlist_ids': playlist_ids } r = requests.get(url, params=params) check_error(r) return r.json()
[ "def", "find_playlists_by_ids", "(", "self", ",", "playlist_ids", ")", ":", "url", "=", "'https://openapi.youku.com/v2/playlists/show_batch.json'", "params", "=", "{", "'client_id'", ":", "self", ".", "client_id", ",", "'playlist_ids'", ":", "playlist_ids", "}", "r", ...
doc: http://open.youku.com/docs/doc?id=67
[ "doc", ":", "http", ":", "//", "open", ".", "youku", ".", "com", "/", "docs", "/", "doc?id", "=", "67" ]
python
train
34.090909