repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
thebjorn/pydeps
pydeps/cli.py
https://github.com/thebjorn/pydeps/blob/1e6715b7bea47a40e8042821b57937deaaa0fdc3/pydeps/cli.py#L45-L78
def base_argparser(argv=()): """Initial parser that can set values for the rest of the parsing process. """ global verbose verbose = _not_verbose _p = argparse.ArgumentParser(add_help=False) _p.add_argument('--debug', action='store_true', help="turn on all the show and verbose options (mainly for debugging pydeps itself)") _p.add_argument('--config', help="specify config file", metavar="FILE") _p.add_argument('--no-config', help="disable processing of config files", action='store_true') _p.add_argument('--version', action='store_true', help='print pydeps version') _p.add_argument('-L', '--log', help=textwrap.dedent(''' set log-level to one of CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET. ''')) _args, argv = _p.parse_known_args(argv) if _args.log: loglevels = "CRITICAL DEBUG ERROR FATAL INFO WARN" if _args.log not in loglevels: # pragma: nocover error('legal values for the -L parameter are:', loglevels) loglevel = getattr(logging, _args.log) else: loglevel = None logging.basicConfig( level=loglevel, format='%(filename)s:%(lineno)d: %(levelname)s: %(message)s' ) if _args.version: # pragma: nocover print("pydeps v" + __version__) sys.exit(0) return _p, _args, argv
[ "def", "base_argparser", "(", "argv", "=", "(", ")", ")", ":", "global", "verbose", "verbose", "=", "_not_verbose", "_p", "=", "argparse", ".", "ArgumentParser", "(", "add_help", "=", "False", ")", "_p", ".", "add_argument", "(", "'--debug'", ",", "action"...
Initial parser that can set values for the rest of the parsing process.
[ "Initial", "parser", "that", "can", "set", "values", "for", "the", "rest", "of", "the", "parsing", "process", "." ]
python
train
saltstack/salt
salt/modules/mac_shadow.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/mac_shadow.py#L238-L257
def get_login_failed_count(name): ''' Get the the number of failed login attempts :param str name: The username of the account :return: The number of failed login attempts :rtype: int :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_login_failed_count admin ''' ret = _get_account_policy_data_value(name, 'failedLoginCount') return salt.utils.mac_utils.parse_return(ret)
[ "def", "get_login_failed_count", "(", "name", ")", ":", "ret", "=", "_get_account_policy_data_value", "(", "name", ",", "'failedLoginCount'", ")", "return", "salt", ".", "utils", ".", "mac_utils", ".", "parse_return", "(", "ret", ")" ]
Get the the number of failed login attempts :param str name: The username of the account :return: The number of failed login attempts :rtype: int :raises: CommandExecutionError on user not found or any other unknown error CLI Example: .. code-block:: bash salt '*' shadow.get_login_failed_count admin
[ "Get", "the", "the", "number", "of", "failed", "login", "attempts" ]
python
train
llllllllll/codetransformer
codetransformer/code.py
https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/code.py#L429-L510
def from_pycode(cls, co): """Create a Code object from a python code object. Parameters ---------- co : CodeType The python code object. Returns ------- code : Code The codetransformer Code object. """ # Make it sparse to instrs[n] is the instruction at bytecode[n] sparse_instrs = tuple( _sparse_args( Instruction.from_opcode( b.opcode, Instruction._no_arg if b.arg is None else _RawArg(b.arg), ) for b in Bytecode(co) ), ) for idx, instr in enumerate(sparse_instrs): if instr is None: # The sparse value continue if instr.absjmp: instr.arg = sparse_instrs[instr.arg] elif instr.reljmp: instr.arg = sparse_instrs[instr.arg + idx + argsize + 1] elif isinstance(instr, LOAD_CONST): instr.arg = co.co_consts[instr.arg] elif instr.uses_name: instr.arg = co.co_names[instr.arg] elif instr.uses_varname: instr.arg = co.co_varnames[instr.arg] elif instr.uses_free: instr.arg = _freevar_argname( instr.arg, co.co_freevars, co.co_cellvars, ) elif instr.have_arg and isinstance(instr.arg, _RawArg): instr.arg = int(instr.arg) flags = Flag.unpack(co.co_flags) has_vargs = flags['CO_VARARGS'] has_kwargs = flags['CO_VARKEYWORDS'] # Here we convert the varnames format into our argnames format. paramnames = co.co_varnames[ :(co.co_argcount + co.co_kwonlyargcount + has_vargs + has_kwargs) ] # We start with the positional arguments. new_paramnames = list(paramnames[:co.co_argcount]) # Add *args next. if has_vargs: new_paramnames.append('*' + paramnames[-1 - has_kwargs]) # Add positional only arguments next. new_paramnames.extend(paramnames[ co.co_argcount:co.co_argcount + co.co_kwonlyargcount ]) # Add **kwargs last. if has_kwargs: new_paramnames.append('**' + paramnames[-1]) return cls( filter(bool, sparse_instrs), argnames=new_paramnames, cellvars=co.co_cellvars, freevars=co.co_freevars, name=co.co_name, filename=co.co_filename, firstlineno=co.co_firstlineno, lnotab={ lno: sparse_instrs[off] for off, lno in findlinestarts(co) }, flags=flags, )
[ "def", "from_pycode", "(", "cls", ",", "co", ")", ":", "# Make it sparse to instrs[n] is the instruction at bytecode[n]", "sparse_instrs", "=", "tuple", "(", "_sparse_args", "(", "Instruction", ".", "from_opcode", "(", "b", ".", "opcode", ",", "Instruction", ".", "_...
Create a Code object from a python code object. Parameters ---------- co : CodeType The python code object. Returns ------- code : Code The codetransformer Code object.
[ "Create", "a", "Code", "object", "from", "a", "python", "code", "object", "." ]
python
train
offu/WeRoBot
werobot/crypto/__init__.py
https://github.com/offu/WeRoBot/blob/fd42109105b03f9acf45ebd9dcabb9d5cff98f3c/werobot/crypto/__init__.py#L123-L144
def encrypt_message(self, reply, timestamp=None, nonce=None): """ 加密微信回复 :param reply: 加密前的回复 :type reply: WeChatReply 或 XML 文本 :return: 加密后的回复文本 """ if hasattr(reply, "render"): reply = reply.render() timestamp = timestamp or to_text(int(time.time())) nonce = nonce or generate_token(5) encrypt = to_text(self.prp_crypto.encrypt(reply, self.app_id)) signature = get_signature(self.token, timestamp, nonce, encrypt) return to_text( self.ENCRYPTED_MESSAGE_XML.format( encrypt=encrypt, signature=signature, timestamp=timestamp, nonce=nonce ) )
[ "def", "encrypt_message", "(", "self", ",", "reply", ",", "timestamp", "=", "None", ",", "nonce", "=", "None", ")", ":", "if", "hasattr", "(", "reply", ",", "\"render\"", ")", ":", "reply", "=", "reply", ".", "render", "(", ")", "timestamp", "=", "ti...
加密微信回复 :param reply: 加密前的回复 :type reply: WeChatReply 或 XML 文本 :return: 加密后的回复文本
[ "加密微信回复", ":", "param", "reply", ":", "加密前的回复", ":", "type", "reply", ":", "WeChatReply", "或", "XML", "文本", ":", "return", ":", "加密后的回复文本" ]
python
train
bitesofcode/projexui
projexui/widgets/xtabwidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtabwidget.py#L187-L195
def resizeEvent(self, event): """ Updates the position of the additional buttons when this widget \ resizes. :param event | <QResizeEvet> """ super(XTabWidget, self).resizeEvent(event) self.adjustButtons()
[ "def", "resizeEvent", "(", "self", ",", "event", ")", ":", "super", "(", "XTabWidget", ",", "self", ")", ".", "resizeEvent", "(", "event", ")", "self", ".", "adjustButtons", "(", ")" ]
Updates the position of the additional buttons when this widget \ resizes. :param event | <QResizeEvet>
[ "Updates", "the", "position", "of", "the", "additional", "buttons", "when", "this", "widget", "\\", "resizes", ".", ":", "param", "event", "|", "<QResizeEvet", ">" ]
python
train
hfaran/piazza-api
piazza_api/rpc.py
https://github.com/hfaran/piazza-api/blob/26201d06e26bada9a838f6765c1bccedad05bd39/piazza_api/rpc.py#L345-L362
def search(self, query, nid=None): """Search for posts with ``query`` :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class :type query: str :param query: The search query; should just be keywords for posts that you are looking for """ r = self.request( method="network.search", nid=nid, data=dict(query=query) ) return self._handle_error(r, "Search with query '{}' failed." .format(query))
[ "def", "search", "(", "self", ",", "query", ",", "nid", "=", "None", ")", ":", "r", "=", "self", ".", "request", "(", "method", "=", "\"network.search\"", ",", "nid", "=", "nid", ",", "data", "=", "dict", "(", "query", "=", "query", ")", ")", "re...
Search for posts with ``query`` :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class :type query: str :param query: The search query; should just be keywords for posts that you are looking for
[ "Search", "for", "posts", "with", "query" ]
python
train
Kozea/cairocffi
cairocffi/fonts.py
https://github.com/Kozea/cairocffi/blob/450853add7e32eea20985b6aa5f54d9cb3cd04fe/cairocffi/fonts.py#L198-L211
def get_ctm(self): """Copies the scaled font’s font current transform matrix. Note that the translation offsets ``(x0, y0)`` of the CTM are ignored by :class:`ScaledFont`. So, the matrix this method returns always has 0 as ``x0`` and ``y0``. :returns: A new :class:`Matrix` object. """ matrix = Matrix() cairo.cairo_scaled_font_get_ctm(self._pointer, matrix._pointer) self._check_status() return matrix
[ "def", "get_ctm", "(", "self", ")", ":", "matrix", "=", "Matrix", "(", ")", "cairo", ".", "cairo_scaled_font_get_ctm", "(", "self", ".", "_pointer", ",", "matrix", ".", "_pointer", ")", "self", ".", "_check_status", "(", ")", "return", "matrix" ]
Copies the scaled font’s font current transform matrix. Note that the translation offsets ``(x0, y0)`` of the CTM are ignored by :class:`ScaledFont`. So, the matrix this method returns always has 0 as ``x0`` and ``y0``. :returns: A new :class:`Matrix` object.
[ "Copies", "the", "scaled", "font’s", "font", "current", "transform", "matrix", "." ]
python
train
saltstack/salt
salt/modules/postgres.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/postgres.py#L1335-L1359
def _role_remove(name, user=None, host=None, port=None, maintenance_db=None, password=None, runas=None): ''' Removes a role from the Postgres Server ''' # check if user exists if not user_exists(name, user, host, port, maintenance_db, password=password, runas=runas): log.info('User \'%s\' does not exist', name) return False # user exists, proceed sub_cmd = 'DROP ROLE "{0}"'.format(name) _psql_prepare_and_run( ['-c', sub_cmd], runas=runas, host=host, user=user, port=port, maintenance_db=maintenance_db, password=password) if not user_exists(name, user, host, port, maintenance_db, password=password, runas=runas): return True else: log.info('Failed to delete user \'%s\'.', name) return False
[ "def", "_role_remove", "(", "name", ",", "user", "=", "None", ",", "host", "=", "None", ",", "port", "=", "None", ",", "maintenance_db", "=", "None", ",", "password", "=", "None", ",", "runas", "=", "None", ")", ":", "# check if user exists", "if", "no...
Removes a role from the Postgres Server
[ "Removes", "a", "role", "from", "the", "Postgres", "Server" ]
python
train
blue-yonder/tsfresh
tsfresh/scripts/run_tsfresh.py
https://github.com/blue-yonder/tsfresh/blob/c72c9c574371cf7dd7d54e00a466792792e5d202/tsfresh/scripts/run_tsfresh.py#L30-L42
def _preprocess(df): """ given a DataFrame where records are stored row-wise, rearrange it such that records are stored column-wise. """ df = df.stack() df.index.rename(["id", "time"], inplace=True) # .reset_index() df.name = "value" df = df.reset_index() return df
[ "def", "_preprocess", "(", "df", ")", ":", "df", "=", "df", ".", "stack", "(", ")", "df", ".", "index", ".", "rename", "(", "[", "\"id\"", ",", "\"time\"", "]", ",", "inplace", "=", "True", ")", "# .reset_index()", "df", ".", "name", "=", "\"value\...
given a DataFrame where records are stored row-wise, rearrange it such that records are stored column-wise.
[ "given", "a", "DataFrame", "where", "records", "are", "stored", "row", "-", "wise", "rearrange", "it", "such", "that", "records", "are", "stored", "column", "-", "wise", "." ]
python
train
tanghaibao/jcvi
jcvi/apps/cdhit.py
https://github.com/tanghaibao/jcvi/blob/d2e31a77b6ade7f41f3b321febc2b4744d1cdeca/jcvi/apps/cdhit.py#L188-L242
def deduplicate(args): """ %prog deduplicate fastafile Wraps `cd-hit-est` to remove duplicate sequences. """ p = OptionParser(deduplicate.__doc__) p.set_align(pctid=96, pctcov=0) p.add_option("--fast", default=False, action="store_true", help="Place sequence in the first cluster") p.add_option("--consensus", default=False, action="store_true", help="Compute consensus sequences") p.add_option("--reads", default=False, action="store_true", help="Use `cd-hit-454` to deduplicate [default: %default]") p.add_option("--samestrand", default=False, action="store_true", help="Enforce same strand alignment") p.set_home("cdhit") p.set_cpus() opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) fastafile, = args identity = opts.pctid / 100. fastafile, qualfile = fasta([fastafile, "--seqtk"]) ocmd = "cd-hit-454" if opts.reads else "cd-hit-est" cmd = op.join(opts.cdhit_home, ocmd) cmd += " -c {0}".format(identity) if ocmd == "cd-hit-est": cmd += " -d 0" # include complete defline if opts.samestrand: cmd += " -r 0" if not opts.fast: cmd += " -g 1" if opts.pctcov != 0: cmd += " -aL {0} -aS {0}".format(opts.pctcov / 100.) dd = fastafile + ".P{0}.cdhit".format(opts.pctid) clstr = dd + ".clstr" cmd += " -M 0 -T {0} -i {1} -o {2}".format(opts.cpus, fastafile, dd) if need_update(fastafile, (dd, clstr)): sh(cmd) if opts.consensus: cons = dd + ".consensus" cmd = op.join(opts.cdhit_home, "cdhit-cluster-consensus") cmd += " clustfile={0} fastafile={1} output={2} maxlen=1".\ format(clstr, fastafile, cons) if need_update((clstr, fastafile), cons): sh(cmd) return dd
[ "def", "deduplicate", "(", "args", ")", ":", "p", "=", "OptionParser", "(", "deduplicate", ".", "__doc__", ")", "p", ".", "set_align", "(", "pctid", "=", "96", ",", "pctcov", "=", "0", ")", "p", ".", "add_option", "(", "\"--fast\"", ",", "default", "...
%prog deduplicate fastafile Wraps `cd-hit-est` to remove duplicate sequences.
[ "%prog", "deduplicate", "fastafile" ]
python
train
jmgilman/Neolib
neolib/pyamf/codec.py
https://github.com/jmgilman/Neolib/blob/228fafeaed0f3195676137732384a14820ae285c/neolib/pyamf/codec.py#L480-L499
def writeElement(self, data): """ Encodes C{data} to AMF. If the data is not able to be matched to an AMF type, then L{pyamf.EncodeError} will be raised. """ key = type(data) func = None try: func = self._func_cache[key] except KeyError: func = self.getTypeFunc(data) if func is None: raise pyamf.EncodeError('Unable to encode %r (type %r)' % ( data, key)) self._func_cache[key] = func func(data)
[ "def", "writeElement", "(", "self", ",", "data", ")", ":", "key", "=", "type", "(", "data", ")", "func", "=", "None", "try", ":", "func", "=", "self", ".", "_func_cache", "[", "key", "]", "except", "KeyError", ":", "func", "=", "self", ".", "getTyp...
Encodes C{data} to AMF. If the data is not able to be matched to an AMF type, then L{pyamf.EncodeError} will be raised.
[ "Encodes", "C", "{", "data", "}", "to", "AMF", ".", "If", "the", "data", "is", "not", "able", "to", "be", "matched", "to", "an", "AMF", "type", "then", "L", "{", "pyamf", ".", "EncodeError", "}", "will", "be", "raised", "." ]
python
train
havardgulldahl/jottalib
src/jottalib/JFS.py
https://github.com/havardgulldahl/jottalib/blob/4d015e4309b1d9055e561ec757363fb2632b4eb7/src/jottalib/JFS.py#L822-L833
def files(self, mountPoint): """Get an iterator of JFSFile() from the given mountPoint. "mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name. """ if isinstance(mountPoint, six.string_types): # shortcut: pass a mountpoint name mountPoint = self.mountPoints[mountPoint] try: return [JFSFile(f, self, parentpath='%s/%s' % (self.path, mountPoint.name)) for f in self.contents(mountPoint).files.iterchildren()] except AttributeError as err: # no files at all return [x for x in []]
[ "def", "files", "(", "self", ",", "mountPoint", ")", ":", "if", "isinstance", "(", "mountPoint", ",", "six", ".", "string_types", ")", ":", "# shortcut: pass a mountpoint name", "mountPoint", "=", "self", ".", "mountPoints", "[", "mountPoint", "]", "try", ":",...
Get an iterator of JFSFile() from the given mountPoint. "mountPoint" may be either an actual mountPoint element from JFSDevice.mountPoints{} or its .name.
[ "Get", "an", "iterator", "of", "JFSFile", "()", "from", "the", "given", "mountPoint", "." ]
python
train
UDST/urbansim
urbansim/models/dcm.py
https://github.com/UDST/urbansim/blob/79f815a6503e109f50be270cee92d0f4a34f49ef/urbansim/models/dcm.py#L1158-L1195
def predict(self, choosers, alternatives, debug=False): """ Choose from among alternatives for a group of agents after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers. """ logger.debug('start: predict models in LCM group {}'.format(self.name)) results = [] for name, df in self._iter_groups(choosers): choices = self.models[name].predict(df, alternatives, debug=debug) if self.remove_alts and len(alternatives) > 0: alternatives = alternatives.loc[ ~alternatives.index.isin(choices)] results.append(choices) logger.debug( 'finish: predict models in LCM group {}'.format(self.name)) return pd.concat(results) if results else pd.Series()
[ "def", "predict", "(", "self", ",", "choosers", ",", "alternatives", ",", "debug", "=", "False", ")", ":", "logger", ".", "debug", "(", "'start: predict models in LCM group {}'", ".", "format", "(", "self", ".", "name", ")", ")", "results", "=", "[", "]", ...
Choose from among alternatives for a group of agents after segmenting the `choosers` table. Parameters ---------- choosers : pandas.DataFrame Table describing the agents making choices, e.g. households. Must have a column matching the .segmentation_col attribute. alternatives : pandas.DataFrame Table describing the things from which agents are choosing. debug : bool If debug is set to true, will set the variable "sim_pdf" on the object to store the probabilities for mapping of the outcome. Returns ------- choices : pandas.Series Mapping of chooser ID to alternative ID. Some choosers will map to a nan value when there are not enough alternatives for all the choosers.
[ "Choose", "from", "among", "alternatives", "for", "a", "group", "of", "agents", "after", "segmenting", "the", "choosers", "table", "." ]
python
train
clalancette/pycdlib
pycdlib/pycdlib.py
https://github.com/clalancette/pycdlib/blob/1e7b77a809e905d67dc71e12d70e850be26b6233/pycdlib/pycdlib.py#L677-L731
def seek(self, offset, whence=0): # type: (int, int) -> int ''' A method to change the stream position to byte offset offset. The offset is interpreted relative to the position indicated by whence. Valid values for whence are: * 0 -- start of stream (the default); offset should be zero or positive * 1 -- current stream position; offset may be negative * 2 -- end of stream; offset is usually negative Parameters: offset - The byte offset to seek to. whence - The position in the file to start from (0 for start, 1 for current, 2 for end) Returns: The new absolute position. ''' if not self._open: raise pycdlibexception.PyCdlibInvalidInput('I/O operation on closed file.') if isinstance(offset, float): raise pycdlibexception.PyCdlibInvalidInput('an integer is required') if whence == 0: # From beginning of file if offset < 0: raise pycdlibexception.PyCdlibInvalidInput('Invalid offset value (must be positive)') if offset < self._length: self._fp.seek(self._startpos + offset, 0) self._offset = offset elif whence == 1: # From current file position if self._offset + offset < 0: raise pycdlibexception.PyCdlibInvalidInput('Invalid offset value (cannot seek before start of file)') if self._offset + offset < self._length: self._fp.seek(self._startpos + self._offset + offset, 0) self._offset += offset elif whence == 2: # From end of file if offset < 0 and abs(offset) > self._length: raise pycdlibexception.PyCdlibInvalidInput('Invalid offset value (cannot seek before start of file)') if self._length + offset < self._length: self._fp.seek(self._length + offset, 0) self._offset = self._length + offset else: raise pycdlibexception.PyCdlibInvalidInput('Invalid value for whence (options are 0, 1, and 2)') return self._offset
[ "def", "seek", "(", "self", ",", "offset", ",", "whence", "=", "0", ")", ":", "# type: (int, int) -> int", "if", "not", "self", ".", "_open", ":", "raise", "pycdlibexception", ".", "PyCdlibInvalidInput", "(", "'I/O operation on closed file.'", ")", "if", "isinst...
A method to change the stream position to byte offset offset. The offset is interpreted relative to the position indicated by whence. Valid values for whence are: * 0 -- start of stream (the default); offset should be zero or positive * 1 -- current stream position; offset may be negative * 2 -- end of stream; offset is usually negative Parameters: offset - The byte offset to seek to. whence - The position in the file to start from (0 for start, 1 for current, 2 for end) Returns: The new absolute position.
[ "A", "method", "to", "change", "the", "stream", "position", "to", "byte", "offset", "offset", ".", "The", "offset", "is", "interpreted", "relative", "to", "the", "position", "indicated", "by", "whence", ".", "Valid", "values", "for", "whence", "are", ":" ]
python
train
connectordb/connectordb-python
connectordb/_datapointarray.py
https://github.com/connectordb/connectordb-python/blob/2092b0cb30898139a247176bcf433d5a4abde7cb/connectordb/_datapointarray.py#L95-L107
def loadExport(self, folder): """Adds the data from a ConnectorDB export. If it is a stream export, then the folder is the location of the export. If it is a device export, then the folder is the export folder with the stream name as a subdirectory If it is a user export, you will use the path of the export folder, with the user/device/stream appended to the end:: myuser.export("./exportdir") DatapointArray().loadExport("./exportdir/username/devicename/streamname") """ self.loadJSON(os.path.join(folder, "data.json")) return self
[ "def", "loadExport", "(", "self", ",", "folder", ")", ":", "self", ".", "loadJSON", "(", "os", ".", "path", ".", "join", "(", "folder", ",", "\"data.json\"", ")", ")", "return", "self" ]
Adds the data from a ConnectorDB export. If it is a stream export, then the folder is the location of the export. If it is a device export, then the folder is the export folder with the stream name as a subdirectory If it is a user export, you will use the path of the export folder, with the user/device/stream appended to the end:: myuser.export("./exportdir") DatapointArray().loadExport("./exportdir/username/devicename/streamname")
[ "Adds", "the", "data", "from", "a", "ConnectorDB", "export", ".", "If", "it", "is", "a", "stream", "export", "then", "the", "folder", "is", "the", "location", "of", "the", "export", ".", "If", "it", "is", "a", "device", "export", "then", "the", "folder...
python
test
KnorrFG/pyparadigm
pyparadigm/surface_composition.py
https://github.com/KnorrFG/pyparadigm/blob/69944cdf3ce2f6414ae1aa1d27a0d8c6e5fb3fd3/pyparadigm/surface_composition.py#L483-L511
def compose(target, root=None): """Top level function to create a surface. :param target: the pygame.Surface to blit on. Or a (width, height) tuple in which case a new surface will be created :type target: - """ if type(root) == Surface: raise ValueError("A Surface may not be used as root, please add " +"it as a single child i.e. compose(...)(Surface(...))") @_inner_func_anot def inner_compose(*children): if root: root_context = root(*children) else: assert len(children) == 1 root_context = children[0] if type(target) == pygame.Surface: surface = target size = target.get_size() else: size = target surface = pygame.Surface(size) root_context._draw(surface, pygame.Rect(0, 0, *size)) return surface return inner_compose
[ "def", "compose", "(", "target", ",", "root", "=", "None", ")", ":", "if", "type", "(", "root", ")", "==", "Surface", ":", "raise", "ValueError", "(", "\"A Surface may not be used as root, please add \"", "+", "\"it as a single child i.e. compose(...)(Surface(...))\"", ...
Top level function to create a surface. :param target: the pygame.Surface to blit on. Or a (width, height) tuple in which case a new surface will be created :type target: -
[ "Top", "level", "function", "to", "create", "a", "surface", ".", ":", "param", "target", ":", "the", "pygame", ".", "Surface", "to", "blit", "on", ".", "Or", "a", "(", "width", "height", ")", "tuple", "in", "which", "case", "a", "new", "surface", "wi...
python
train
fossasia/knittingpattern
knittingpattern/convert/AYABPNGBuilder.py
https://github.com/fossasia/knittingpattern/blob/8e608896b0ab82fea1ca9fbfa2b4ee023d8c8027/knittingpattern/convert/AYABPNGBuilder.py#L82-L93
def _set_pixel(self, x, y, color): """set the color of the pixel. :param color: must be a valid color in the form of "#RRGGBB". If you need to convert color, use `_set_pixel_and_convert_color()`. """ if not self.is_in_bounds(x, y): return rgb = self._convert_rrggbb_to_image_color(color) x -= self._min_x y -= self._min_y self._image.putpixel((x, y), rgb)
[ "def", "_set_pixel", "(", "self", ",", "x", ",", "y", ",", "color", ")", ":", "if", "not", "self", ".", "is_in_bounds", "(", "x", ",", "y", ")", ":", "return", "rgb", "=", "self", ".", "_convert_rrggbb_to_image_color", "(", "color", ")", "x", "-=", ...
set the color of the pixel. :param color: must be a valid color in the form of "#RRGGBB". If you need to convert color, use `_set_pixel_and_convert_color()`.
[ "set", "the", "color", "of", "the", "pixel", "." ]
python
valid
profitbricks/profitbricks-sdk-python
profitbricks/client.py
https://github.com/profitbricks/profitbricks-sdk-python/blob/2c804b141688eccb07d6ae56601d5c60a62abebd/profitbricks/client.py#L1998-L2012
def list_group_users(self, group_id, depth=1): """ Retrieves a list of all users that are members of a particular group. :param group_id: The unique ID of the group. :type group_id: ``str`` :param depth: The depth of the response data. :type depth: ``int`` """ response = self._perform_request( '/um/groups/%s/users?depth=%s' % (group_id, str(depth))) return response
[ "def", "list_group_users", "(", "self", ",", "group_id", ",", "depth", "=", "1", ")", ":", "response", "=", "self", ".", "_perform_request", "(", "'/um/groups/%s/users?depth=%s'", "%", "(", "group_id", ",", "str", "(", "depth", ")", ")", ")", "return", "re...
Retrieves a list of all users that are members of a particular group. :param group_id: The unique ID of the group. :type group_id: ``str`` :param depth: The depth of the response data. :type depth: ``int``
[ "Retrieves", "a", "list", "of", "all", "users", "that", "are", "members", "of", "a", "particular", "group", "." ]
python
valid
SpriteLink/NIPAP
nipap-www/nipapwww/controllers/xhr.py
https://github.com/SpriteLink/NIPAP/blob/f96069f11ab952d80b13cab06e0528f2d24b3de9/nipap-www/nipapwww/controllers/xhr.py#L92-L132
def smart_search_vrf(self): """ Perform a smart VRF search. The "smart" search function tries extract a query from a text string. This query is then passed to the search_vrf function, which performs the search. """ search_options = {} extra_query = None if 'query_id' in request.json: search_options['query_id'] = request.json['query_id'] if 'max_result' in request.json: search_options['max_result'] = request.json['max_result'] if 'offset' in request.json: search_options['offset'] = request.json['offset'] if 'vrf_id' in request.json: extra_query = { 'val1': 'id', 'operator': 'equals', 'val2': request.json['vrf_id'] } try: result = VRF.smart_search(request.json['query_string'], search_options, extra_query ) # Remove error key in result from backend as it interferes with the # error handling of the web interface. # TODO: Reevaluate how to deal with different types of errors; soft # errors like query string parser errors and hard errors like lost # database. del result['error'] except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(result, cls=NipapJSONEncoder)
[ "def", "smart_search_vrf", "(", "self", ")", ":", "search_options", "=", "{", "}", "extra_query", "=", "None", "if", "'query_id'", "in", "request", ".", "json", ":", "search_options", "[", "'query_id'", "]", "=", "request", ".", "json", "[", "'query_id'", ...
Perform a smart VRF search. The "smart" search function tries extract a query from a text string. This query is then passed to the search_vrf function, which performs the search.
[ "Perform", "a", "smart", "VRF", "search", "." ]
python
train
twisted/mantissa
xmantissa/scrolltable.py
https://github.com/twisted/mantissa/blob/53e5502aba23ce99be78b27f923a276593033fe8/xmantissa/scrolltable.py#L734-L754
def constructRows(self, items): """ Build row objects that are serializable using Athena for sending to the client. @param items: an iterable of objects compatible with my columns' C{extractValue} methods. @return: a list of dictionaries, where each dictionary has a string key for each column name in my list of columns. """ rows = [] for item in items: row = dict((colname, col.extractValue(self, item)) for (colname, col) in self.columns.iteritems()) link = self.linkToItem(item) if link is not None: row[u'__id__'] = link rows.append(row) return rows
[ "def", "constructRows", "(", "self", ",", "items", ")", ":", "rows", "=", "[", "]", "for", "item", "in", "items", ":", "row", "=", "dict", "(", "(", "colname", ",", "col", ".", "extractValue", "(", "self", ",", "item", ")", ")", "for", "(", "coln...
Build row objects that are serializable using Athena for sending to the client. @param items: an iterable of objects compatible with my columns' C{extractValue} methods. @return: a list of dictionaries, where each dictionary has a string key for each column name in my list of columns.
[ "Build", "row", "objects", "that", "are", "serializable", "using", "Athena", "for", "sending", "to", "the", "client", "." ]
python
train
mitsei/dlkit
dlkit/json_/assessment/sessions.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/sessions.py#L3639-L3658
def assign_item_to_bank(self, item_id, bank_id): """Adds an existing ``Item`` to a ``Bank``. arg: item_id (osid.id.Id): the ``Id`` of the ``Item`` arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` raise: AlreadyExists - ``item_id`` is already assigned to ``bank_id`` raise: NotFound - ``item_id`` or ``bank_id`` not found raise: NullArgument - ``item_id`` or ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin mgr = self._get_provider_manager('ASSESSMENT', local=True) lookup_session = mgr.get_bank_lookup_session(proxy=self._proxy) lookup_session.get_bank(bank_id) # to raise NotFound self._assign_object_to_catalog(item_id, bank_id)
[ "def", "assign_item_to_bank", "(", "self", ",", "item_id", ",", "bank_id", ")", ":", "# Implemented from template for", "# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin", "mgr", "=", "self", ".", "_get_provider_manager", "(", "'ASSESSMENT'", ",", "local", ...
Adds an existing ``Item`` to a ``Bank``. arg: item_id (osid.id.Id): the ``Id`` of the ``Item`` arg: bank_id (osid.id.Id): the ``Id`` of the ``Bank`` raise: AlreadyExists - ``item_id`` is already assigned to ``bank_id`` raise: NotFound - ``item_id`` or ``bank_id`` not found raise: NullArgument - ``item_id`` or ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred *compliance: mandatory -- This method must be implemented.*
[ "Adds", "an", "existing", "Item", "to", "a", "Bank", "." ]
python
train
EpistasisLab/tpot
tpot/builtins/one_hot_encoder.py
https://github.com/EpistasisLab/tpot/blob/b626271e6b5896a73fb9d7d29bebc7aa9100772e/tpot/builtins/one_hot_encoder.py#L239-L267
def _matrix_adjust(self, X): """Adjust all values in X to encode for NaNs and infinities in the data. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- X : array-like, shape=(n_samples, n_feature) Input array without any NaNs or infinities. """ data_matrix = X.data if sparse.issparse(X) else X # Shift all values to specially encode for NAN/infinity/OTHER and 0 # Old value New Value # --------- --------- # N (0..int_max) N + 3 # np.NaN 2 # infinity 2 # *other* 1 # # A value of 0 is reserved, as that is specially handled in sparse # matrices. data_matrix += len(SPARSE_ENCODINGS) + 1 data_matrix[~np.isfinite(data_matrix)] = SPARSE_ENCODINGS['NAN'] return X
[ "def", "_matrix_adjust", "(", "self", ",", "X", ")", ":", "data_matrix", "=", "X", ".", "data", "if", "sparse", ".", "issparse", "(", "X", ")", "else", "X", "# Shift all values to specially encode for NAN/infinity/OTHER and 0", "# Old value New Value", "# --...
Adjust all values in X to encode for NaNs and infinities in the data. Parameters ---------- X : array-like, shape=(n_samples, n_feature) Input array of type int. Returns ------- X : array-like, shape=(n_samples, n_feature) Input array without any NaNs or infinities.
[ "Adjust", "all", "values", "in", "X", "to", "encode", "for", "NaNs", "and", "infinities", "in", "the", "data", "." ]
python
train
manns/pyspread
pyspread/src/lib/fileio.py
https://github.com/manns/pyspread/blob/0e2fd44c2e0f06605efc3058c20a43a8c1f9e7e0/pyspread/src/lib/fileio.py#L103-L115
def write(self, *args, **kwargs): """Write that shows progress in statusbar for each <freq> cells""" self.progress_status() # Check abortes state and raise StopIteration if aborted if self.aborted: statustext = _("File saving aborted.") post_command_event(self.main_window, self.main_window.StatusBarMsg, text=statustext) return False return self.parent_cls.write(self, *args, **kwargs)
[ "def", "write", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "self", ".", "progress_status", "(", ")", "# Check abortes state and raise StopIteration if aborted", "if", "self", ".", "aborted", ":", "statustext", "=", "_", "(", "\"File savi...
Write that shows progress in statusbar for each <freq> cells
[ "Write", "that", "shows", "progress", "in", "statusbar", "for", "each", "<freq", ">", "cells" ]
python
train
Azure/azure-cosmos-table-python
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L1044-L1072
def insert_or_replace_entity(self, table_name, entity, timeout=None): ''' Replaces an existing entity or inserts a new entity if it does not exist in the table. Because this operation can insert or update an entity, it is also known as an "upsert" operation. If insert_or_replace_entity is used to replace an entity, any properties from the previous entity will be removed if the new entity does not define them. :param str table_name: The name of the table in which to insert or replace the entity. :param entity: The entity to insert or replace. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str ''' _validate_not_none('table_name', table_name) request = _insert_or_replace_entity(entity, self.require_encryption, self.key_encryption_key, self.encryption_resolver_function) request.host_locations = self._get_host_locations() request.query['timeout'] = _int_to_str(timeout) request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey']) return self._perform_request(request, _extract_etag)
[ "def", "insert_or_replace_entity", "(", "self", ",", "table_name", ",", "entity", ",", "timeout", "=", "None", ")", ":", "_validate_not_none", "(", "'table_name'", ",", "table_name", ")", "request", "=", "_insert_or_replace_entity", "(", "entity", ",", "self", "...
Replaces an existing entity or inserts a new entity if it does not exist in the table. Because this operation can insert or update an entity, it is also known as an "upsert" operation. If insert_or_replace_entity is used to replace an entity, any properties from the previous entity will be removed if the new entity does not define them. :param str table_name: The name of the table in which to insert or replace the entity. :param entity: The entity to insert or replace. Could be a dict or an entity object. Must contain a PartitionKey and a RowKey. :type entity: dict or :class:`~azure.storage.table.models.Entity` :param int timeout: The server timeout, expressed in seconds. :return: The etag of the entity. :rtype: str
[ "Replaces", "an", "existing", "entity", "or", "inserts", "a", "new", "entity", "if", "it", "does", "not", "exist", "in", "the", "table", ".", "Because", "this", "operation", "can", "insert", "or", "update", "an", "entity", "it", "is", "also", "known", "a...
python
train
maas/python-libmaas
maas/client/utils/profiles.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/utils/profiles.py#L247-L283
def open(cls, dbpath=Path("~/.maas.db").expanduser(), migrate_from=Path("~/.maascli.db").expanduser()): """Load a profiles database. Called without arguments this will open (and create) a database in the user's home directory. **Note** that this returns a context manager which will close the database on exit, saving if the exit is clean. :param dbpath: The path to the database file to create and open. :param migrate_from: Path to a database file to migrate from. """ # Ensure we're working with a Path instance. dbpath = Path(dbpath) migrate_from = Path(migrate_from) # See if we ought to do a one-time migration. migrate = migrate_from.is_file() and not dbpath.exists() # Initialise filename with restrictive permissions... dbpath.touch(mode=0o600, exist_ok=True) # Final check to see if it's safe to migrate. migrate = migrate and not migrate_from.samefile(dbpath) # before opening it with sqlite. database = sqlite3.connect(str(dbpath)) try: store = cls(database) if migrate: schema_import(database, migrate_from) yield store else: yield store except: # noqa: E722 raise else: database.commit() finally: database.close()
[ "def", "open", "(", "cls", ",", "dbpath", "=", "Path", "(", "\"~/.maas.db\"", ")", ".", "expanduser", "(", ")", ",", "migrate_from", "=", "Path", "(", "\"~/.maascli.db\"", ")", ".", "expanduser", "(", ")", ")", ":", "# Ensure we're working with a Path instance...
Load a profiles database. Called without arguments this will open (and create) a database in the user's home directory. **Note** that this returns a context manager which will close the database on exit, saving if the exit is clean. :param dbpath: The path to the database file to create and open. :param migrate_from: Path to a database file to migrate from.
[ "Load", "a", "profiles", "database", "." ]
python
train
webkom/django-auth-abakus
abakus/auth.py
https://github.com/webkom/django-auth-abakus/blob/26779ad8e9f520835c2665f8a4d6731ca6f4ca18/abakus/auth.py#L25-L71
def authenticate(self, username, password): """ Should try to login with abakus.no (NERD). """ if getattr(settings, 'ABAKUS_DUMMY_AUTH', False): return self.dummy_authenticate(username, password) response = requests.post(url=path, data={'username': username, 'password': password}) info = response.json() try: user_info = info['user'] except KeyError: raise ApiError(info['status_message']) if not bool(user_info['auth']): return None if getattr(settings, 'ABAKUS_AUTH_REQUIRE_ABAKUS', False) and not user_info['is_abakus']: return None if getattr(settings, 'ABAKUS_AUTH_REQUIRE_ABAKOM', False) and not user_info['is_abakom']: return None if hasattr(settings, 'ABAKUS_GROUP_REQUIRED'): if not self.has_required_group(user_info): return None user = get_user_model().objects.get_or_create(username=username)[0] user.is_active = True self.parse_name(user, user_info['name']) user.save() if 'committees' in user_info: for committee in user_info['committees']: groups = Group.objects.filter(name=committee) if len(groups) == 1: user.groups.add(groups[0]) if hasattr(settings, 'ABAKUS_SUPERUSER_GROUPS'): for superuser_committee in settings.ABAKUS_SUPERUSER_GROUPS: if superuser_committee in user_info['committees']: user.is_superuser = True user.is_staff = True user.save() break return user
[ "def", "authenticate", "(", "self", ",", "username", ",", "password", ")", ":", "if", "getattr", "(", "settings", ",", "'ABAKUS_DUMMY_AUTH'", ",", "False", ")", ":", "return", "self", ".", "dummy_authenticate", "(", "username", ",", "password", ")", "respons...
Should try to login with abakus.no (NERD).
[ "Should", "try", "to", "login", "with", "abakus", ".", "no", "(", "NERD", ")", "." ]
python
train
qntm/greenery
greenery/fsm.py
https://github.com/qntm/greenery/blob/f4dc7fb483825459d26e8fe9f417764469c56ee8/greenery/fsm.py#L649-L684
def derive(self, input): ''' Compute the Brzozowski derivative of this FSM with respect to the input string of symbols. <https://en.wikipedia.org/wiki/Brzozowski_derivative> If any of the symbols are not members of the alphabet, that's a KeyError. If you fall into oblivion, then the derivative is an FSM accepting no strings. ''' try: # Consume the input string. state = self.initial for symbol in input: if not symbol in self.alphabet: if not anything_else in self.alphabet: raise KeyError(symbol) symbol = anything_else # Missing transition = transition to dead state if not (state in self.map and symbol in self.map[state]): raise OblivionError state = self.map[state][symbol] # OK so now we have consumed that string, use the new location as the # starting point. return fsm( alphabet = self.alphabet, states = self.states, initial = state, finals = self.finals, map = self.map, ) except OblivionError: # Fell out of the FSM. The derivative of this FSM is the empty FSM. return null(self.alphabet)
[ "def", "derive", "(", "self", ",", "input", ")", ":", "try", ":", "# Consume the input string.", "state", "=", "self", ".", "initial", "for", "symbol", "in", "input", ":", "if", "not", "symbol", "in", "self", ".", "alphabet", ":", "if", "not", "anything_...
Compute the Brzozowski derivative of this FSM with respect to the input string of symbols. <https://en.wikipedia.org/wiki/Brzozowski_derivative> If any of the symbols are not members of the alphabet, that's a KeyError. If you fall into oblivion, then the derivative is an FSM accepting no strings.
[ "Compute", "the", "Brzozowski", "derivative", "of", "this", "FSM", "with", "respect", "to", "the", "input", "string", "of", "symbols", ".", "<https", ":", "//", "en", ".", "wikipedia", ".", "org", "/", "wiki", "/", "Brzozowski_derivative", ">", "If", "any"...
python
train
secdev/scapy
scapy/packet.py
https://github.com/secdev/scapy/blob/3ffe757c184017dd46464593a8f80f85abc1e79a/scapy/packet.py#L551-L560
def build(self): """ Create the current layer :return: string of the packet with the payload """ p = self.do_build() p += self.build_padding() p = self.build_done(p) return p
[ "def", "build", "(", "self", ")", ":", "p", "=", "self", ".", "do_build", "(", ")", "p", "+=", "self", ".", "build_padding", "(", ")", "p", "=", "self", ".", "build_done", "(", "p", ")", "return", "p" ]
Create the current layer :return: string of the packet with the payload
[ "Create", "the", "current", "layer" ]
python
train
quantmind/pulsar
pulsar/apps/wsgi/wrappers.py
https://github.com/quantmind/pulsar/blob/fee44e871954aa6ca36d00bb5a3739abfdb89b26/pulsar/apps/wsgi/wrappers.py#L172-L179
def cookies(self): """Container of request cookies """ cookies = SimpleCookie() cookie = self.environ.get('HTTP_COOKIE') if cookie: cookies.load(cookie) return cookies
[ "def", "cookies", "(", "self", ")", ":", "cookies", "=", "SimpleCookie", "(", ")", "cookie", "=", "self", ".", "environ", ".", "get", "(", "'HTTP_COOKIE'", ")", "if", "cookie", ":", "cookies", ".", "load", "(", "cookie", ")", "return", "cookies" ]
Container of request cookies
[ "Container", "of", "request", "cookies" ]
python
train
shexSpec/grammar
parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py
https://github.com/shexSpec/grammar/blob/4497cd1f73fa6703bca6e2cb53ba9c120f22e48c/parsers/python/pyshexc/parser_impl/shex_node_expression_parser.py#L121-L129
def _literal_exclusions(self, stem: LiteralStemRange, exclusions: List[ShExDocParser.LiteralExclusionContext]) -> None: """ ShExC: literalExclusion = '-' literal STEM_MARK? ShExJ: exclusions: [STRING|LiteralStem +] literalStem: {stem:STRING} """ for excl in exclusions: excl_literal_v = self.context.literal_to_ObjectLiteral(excl.literal()).value stem.exclusions.append(LiteralStem(excl_literal_v) if excl.STEM_MARK() else excl_literal_v)
[ "def", "_literal_exclusions", "(", "self", ",", "stem", ":", "LiteralStemRange", ",", "exclusions", ":", "List", "[", "ShExDocParser", ".", "LiteralExclusionContext", "]", ")", "->", "None", ":", "for", "excl", "in", "exclusions", ":", "excl_literal_v", "=", "...
ShExC: literalExclusion = '-' literal STEM_MARK? ShExJ: exclusions: [STRING|LiteralStem +] literalStem: {stem:STRING}
[ "ShExC", ":", "literalExclusion", "=", "-", "literal", "STEM_MARK?", "ShExJ", ":", "exclusions", ":", "[", "STRING|LiteralStem", "+", "]", "literalStem", ":", "{", "stem", ":", "STRING", "}" ]
python
train
d0c-s4vage/pfp
pfp/bitwrap.py
https://github.com/d0c-s4vage/pfp/blob/32f2d34fdec1c70019fa83c7006d5e3be0f92fcd/pfp/bitwrap.py#L207-L217
def size(self): """Return the size of the stream, or -1 if it cannot be determined. """ pos = self._stream.tell() # seek to the end of the stream self._stream.seek(0,2) size = self._stream.tell() self._stream.seek(pos, 0) return size
[ "def", "size", "(", "self", ")", ":", "pos", "=", "self", ".", "_stream", ".", "tell", "(", ")", "# seek to the end of the stream", "self", ".", "_stream", ".", "seek", "(", "0", ",", "2", ")", "size", "=", "self", ".", "_stream", ".", "tell", "(", ...
Return the size of the stream, or -1 if it cannot be determined.
[ "Return", "the", "size", "of", "the", "stream", "or", "-", "1", "if", "it", "cannot", "be", "determined", "." ]
python
train
eykd/paved
paved/s3.py
https://github.com/eykd/paved/blob/f04f8a4248c571f3d5ce882b325884a3e5d80203/paved/s3.py#L44-L94
def upload_s3(file_path, bucket_name, file_key, force=False, acl='private'): """Upload a local file to S3. """ file_path = path(file_path) bucket = open_s3(bucket_name) if file_path.isdir(): # Upload the contents of the dir path. paths = file_path.listdir() paths_keys = list(zip(paths, ['%s/%s' % (file_key, p.name) for p in paths])) else: # Upload just the given file path. paths_keys = [(file_path, file_key)] for p, k in paths_keys: headers = {} s3_key = bucket.get_key(k) if not s3_key: from boto.s3.key import Key s3_key = Key(bucket, k) content_type = mimetypes.guess_type(p)[0] if content_type: headers['Content-Type'] = content_type file_size = p.stat().st_size file_data = p.bytes() file_md5, file_md5_64 = s3_key.get_md5_from_hexdigest(hashlib.md5(file_data).hexdigest()) # Check the hash. if s3_key.etag: s3_md5 = s3_key.etag.replace('"', '') if s3_md5 == file_md5: info('Hash is the same. Skipping %s' % file_path) continue elif not force: # Check if file on S3 is older than local file. s3_datetime = datetime.datetime(*time.strptime( s3_key.last_modified, '%a, %d %b %Y %H:%M:%S %Z')[0:6]) local_datetime = datetime.datetime.utcfromtimestamp(p.stat().st_mtime) if local_datetime < s3_datetime: info("File %s hasn't been modified since last " \ "being uploaded" % (file_key)) continue # File is newer, let's process and upload info("Uploading %s..." % (file_key)) try: s3_key.set_contents_from_string(file_data, headers, policy=acl, replace=True, md5=(file_md5, file_md5_64)) except Exception as e: error("Failed: %s" % e) raise
[ "def", "upload_s3", "(", "file_path", ",", "bucket_name", ",", "file_key", ",", "force", "=", "False", ",", "acl", "=", "'private'", ")", ":", "file_path", "=", "path", "(", "file_path", ")", "bucket", "=", "open_s3", "(", "bucket_name", ")", "if", "file...
Upload a local file to S3.
[ "Upload", "a", "local", "file", "to", "S3", "." ]
python
valid
django-haystack/pysolr
pysolr.py
https://github.com/django-haystack/pysolr/blob/ee28b39324fa21a99842d297e313c1759d8adbd2/pysolr.py#L1161-L1167
def reload(self, core): # NOQA: A003 """http://wiki.apache.org/solr/CoreAdmin#head-3f125034c6a64611779442539812067b8b430930""" params = { 'action': 'RELOAD', 'core': core, } return self._get_url(self.url, params=params)
[ "def", "reload", "(", "self", ",", "core", ")", ":", "# NOQA: A003", "params", "=", "{", "'action'", ":", "'RELOAD'", ",", "'core'", ":", "core", ",", "}", "return", "self", ".", "_get_url", "(", "self", ".", "url", ",", "params", "=", "params", ")" ...
http://wiki.apache.org/solr/CoreAdmin#head-3f125034c6a64611779442539812067b8b430930
[ "http", ":", "//", "wiki", ".", "apache", ".", "org", "/", "solr", "/", "CoreAdmin#head", "-", "3f125034c6a64611779442539812067b8b430930" ]
python
train
nchopin/particles
particles/resampling.py
https://github.com/nchopin/particles/blob/3faa97a1073db45c5889eef3e015dd76ef350b52/particles/resampling.py#L259-L274
def log_sum_exp_ab(a, b): """log_sum_exp for two scalars. Parameters ---------- a, b: float Returns ------- c: float c = log(e^a + e^b) """ if a > b: return a + np.log(1. + np.exp(b - a)) else: return b + np.log(1. + np.exp(a - b))
[ "def", "log_sum_exp_ab", "(", "a", ",", "b", ")", ":", "if", "a", ">", "b", ":", "return", "a", "+", "np", ".", "log", "(", "1.", "+", "np", ".", "exp", "(", "b", "-", "a", ")", ")", "else", ":", "return", "b", "+", "np", ".", "log", "(",...
log_sum_exp for two scalars. Parameters ---------- a, b: float Returns ------- c: float c = log(e^a + e^b)
[ "log_sum_exp", "for", "two", "scalars", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_mac_address_table.py#L313-L327
def get_mac_address_table_output_mac_address_table_vlanid(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_mac_address_table = ET.Element("get_mac_address_table") config = get_mac_address_table output = ET.SubElement(get_mac_address_table, "output") mac_address_table = ET.SubElement(output, "mac-address-table") mac_address_key = ET.SubElement(mac_address_table, "mac-address") mac_address_key.text = kwargs.pop('mac_address') vlanid = ET.SubElement(mac_address_table, "vlanid") vlanid.text = kwargs.pop('vlanid') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "get_mac_address_table_output_mac_address_table_vlanid", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "get_mac_address_table", "=", "ET", ".", "Element", "(", "\"get_mac_address_table\"", ")", ...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
kaste/mockito-python
mockito/mockito.py
https://github.com/kaste/mockito-python/blob/d6b22b003f56ee5b156dbd9d8ba209faf35b6713/mockito/mockito.py#L284-L317
def expect(obj, strict=None, times=None, atleast=None, atmost=None, between=None): """Stub a function call, and set up an expected call count. Usage:: # Given `dog` is an instance of a `Dog` expect(dog, times=1).bark('Wuff').thenReturn('Miau') dog.bark('Wuff') dog.bark('Wuff') # will throw at call time: too many invocations # maybe if you need to ensure that `dog.bark()` was called at all verifyNoUnwantedInteractions() .. note:: You must :func:`unstub` after stubbing, or use `with` statement. See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions` """ if strict is None: strict = True theMock = _get_mock(obj, strict=strict) verification_fn = _get_wanted_verification( times=times, atleast=atleast, atmost=atmost, between=between) class Expect(object): def __getattr__(self, method_name): return invocation.StubbedInvocation( theMock, method_name, verification=verification_fn, strict=strict) return Expect()
[ "def", "expect", "(", "obj", ",", "strict", "=", "None", ",", "times", "=", "None", ",", "atleast", "=", "None", ",", "atmost", "=", "None", ",", "between", "=", "None", ")", ":", "if", "strict", "is", "None", ":", "strict", "=", "True", "theMock",...
Stub a function call, and set up an expected call count. Usage:: # Given `dog` is an instance of a `Dog` expect(dog, times=1).bark('Wuff').thenReturn('Miau') dog.bark('Wuff') dog.bark('Wuff') # will throw at call time: too many invocations # maybe if you need to ensure that `dog.bark()` was called at all verifyNoUnwantedInteractions() .. note:: You must :func:`unstub` after stubbing, or use `with` statement. See :func:`when`, :func:`when2`, :func:`verifyNoUnwantedInteractions`
[ "Stub", "a", "function", "call", "and", "set", "up", "an", "expected", "call", "count", "." ]
python
train
wtsi-hgi/gitlab-build-variables
gitlabbuildvariables/reader.py
https://github.com/wtsi-hgi/gitlab-build-variables/blob/ed1afe50bc41fa20ffb29cacba5ff6dbc2446808/gitlabbuildvariables/reader.py#L12-L27
def read_variables(config_location: str) -> Dict[str, str]: """ Reads variables out of a config file. Variables can be in a ini file, a shell file used to source the variables (i.e. one that has just got "export *" like statements in it) or in JSON. :param config_location: the location of the config file :return: dictionary where the variable names are key and their values are the values """ with open(config_location, "r") as config_file: config_lines = config_file.readlines() try: return json.loads("".join(config_lines), parse_int=lambda num_str: str(num_str), parse_float=lambda float_str: str(float_str)) except JSONDecodeError: pass config_lines = _shell_to_ini(config_lines) return _read_ini_config("\n".join(config_lines))
[ "def", "read_variables", "(", "config_location", ":", "str", ")", "->", "Dict", "[", "str", ",", "str", "]", ":", "with", "open", "(", "config_location", ",", "\"r\"", ")", "as", "config_file", ":", "config_lines", "=", "config_file", ".", "readlines", "("...
Reads variables out of a config file. Variables can be in a ini file, a shell file used to source the variables (i.e. one that has just got "export *" like statements in it) or in JSON. :param config_location: the location of the config file :return: dictionary where the variable names are key and their values are the values
[ "Reads", "variables", "out", "of", "a", "config", "file", ".", "Variables", "can", "be", "in", "a", "ini", "file", "a", "shell", "file", "used", "to", "source", "the", "variables", "(", "i", ".", "e", ".", "one", "that", "has", "just", "got", "export...
python
train
UB-UNIBAS/simple-elastic
simple_elastic/index.py
https://github.com/UB-UNIBAS/simple-elastic/blob/54f2fdd3405a7eafbf8873f337da263b8d47532a/simple_elastic/index.py#L189-L197
def update(self, doc: dict, doc_id: str): """Partial update to a single document. Uses the Update API with the specified partial document. """ body = { 'doc': doc } self.instance.update(self.index, self.doc_type, doc_id, body=body)
[ "def", "update", "(", "self", ",", "doc", ":", "dict", ",", "doc_id", ":", "str", ")", ":", "body", "=", "{", "'doc'", ":", "doc", "}", "self", ".", "instance", ".", "update", "(", "self", ".", "index", ",", "self", ".", "doc_type", ",", "doc_id"...
Partial update to a single document. Uses the Update API with the specified partial document.
[ "Partial", "update", "to", "a", "single", "document", "." ]
python
train
smira/py-numa
numa.py
https://github.com/smira/py-numa/blob/eb38979c61028eb9422a4ad1eda0387cd93ea390/numa.py#L230-L243
def set_interleave_mask(nodemask): """ Sets the memory interleave mask for the current thread to C{nodemask}. @param nodemask: node mask @type nodemask: C{set} """ mask = set_to_numa_nodemask(nodemask) tmp = bitmask_t() tmp.maskp = cast(byref(mask), POINTER(c_ulong)) tmp.size = sizeof(nodemask_t) * 8 libnuma.numa_set_interleave_mask(byref(tmp))
[ "def", "set_interleave_mask", "(", "nodemask", ")", ":", "mask", "=", "set_to_numa_nodemask", "(", "nodemask", ")", "tmp", "=", "bitmask_t", "(", ")", "tmp", ".", "maskp", "=", "cast", "(", "byref", "(", "mask", ")", ",", "POINTER", "(", "c_ulong", ")", ...
Sets the memory interleave mask for the current thread to C{nodemask}. @param nodemask: node mask @type nodemask: C{set}
[ "Sets", "the", "memory", "interleave", "mask", "for", "the", "current", "thread", "to", "C", "{", "nodemask", "}", "." ]
python
train
datamachine/twx.botapi
twx/botapi/botapi.py
https://github.com/datamachine/twx.botapi/blob/c85184da738169e8f9d6d8e62970540f427c486e/twx/botapi/botapi.py#L4266-L4268
def set_chat_photo(self, *args, **kwargs): """See :func:`set_chat_photo`""" return set_chat_photo(*args, **self._merge_overrides(**kwargs)).run()
[ "def", "set_chat_photo", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "set_chat_photo", "(", "*", "args", ",", "*", "*", "self", ".", "_merge_overrides", "(", "*", "*", "kwargs", ")", ")", ".", "run", "(", ")" ]
See :func:`set_chat_photo`
[ "See", ":", "func", ":", "set_chat_photo" ]
python
train
IrvKalb/pygwidgets
pygwidgets/pygwidgets.py
https://github.com/IrvKalb/pygwidgets/blob/a830d8885d4d209e471cb53816277d30db56273c/pygwidgets/pygwidgets.py#L1301-L1305
def disableGroup(self): """Disables all radio buttons in the group""" radioButtonListInGroup = PygWidgetsRadioButton.__PygWidgets__Radio__Buttons__Groups__Dicts__[self.group] for radioButton in radioButtonListInGroup: radioButton.disable()
[ "def", "disableGroup", "(", "self", ")", ":", "radioButtonListInGroup", "=", "PygWidgetsRadioButton", ".", "__PygWidgets__Radio__Buttons__Groups__Dicts__", "[", "self", ".", "group", "]", "for", "radioButton", "in", "radioButtonListInGroup", ":", "radioButton", ".", "di...
Disables all radio buttons in the group
[ "Disables", "all", "radio", "buttons", "in", "the", "group" ]
python
train
ghcollin/multitables
multitables.py
https://github.com/ghcollin/multitables/blob/9654a45800289a20e66d2b0e0666149f0d370f93/multitables.py#L107-L115
def wait(self, index, next_index=None): """ Block until it is the turn indicated by index. :param index: :param next_index: Set the index to this value after finishing. Releases the process waiting on next_index. Defaults to incrementing index by 1. :return: """ return OrderedBarrier.Guard(self, index, index+1 if next_index is None else next_index)
[ "def", "wait", "(", "self", ",", "index", ",", "next_index", "=", "None", ")", ":", "return", "OrderedBarrier", ".", "Guard", "(", "self", ",", "index", ",", "index", "+", "1", "if", "next_index", "is", "None", "else", "next_index", ")" ]
Block until it is the turn indicated by index. :param index: :param next_index: Set the index to this value after finishing. Releases the process waiting on next_index. Defaults to incrementing index by 1. :return:
[ "Block", "until", "it", "is", "the", "turn", "indicated", "by", "index", ".", ":", "param", "index", ":", ":", "param", "next_index", ":", "Set", "the", "index", "to", "this", "value", "after", "finishing", ".", "Releases", "the", "process", "waiting", "...
python
test
Ex-Mente/auxi.0
auxi/core/reporting.py
https://github.com/Ex-Mente/auxi.0/blob/2dcdae74154f136f8ca58289fe5b20772f215046/auxi/core/reporting.py#L53-L81
def render(self, format=ReportFormat.printout): """ Render the report in the specified format :param format: The format. The default format is to print the report to the console. :returns: If the format was set to 'string' then a string representation of the report is returned. """ table = self._generate_table_() if format == ReportFormat.printout: print(tabulate(table, headers="firstrow", tablefmt="simple")) elif format == ReportFormat.latex: self._render_latex_(table) elif format == ReportFormat.txt: self._render_txt_(table) elif format == ReportFormat.csv: self._render_csv_(table) elif format == ReportFormat.string: return str(tabulate(table, headers="firstrow", tablefmt="simple")) elif format == ReportFormat.matplotlib: self._render_matplotlib_() elif format == ReportFormat.png: if self.output_path is None: self._render_matplotlib_() else: self._render_matplotlib_(True)
[ "def", "render", "(", "self", ",", "format", "=", "ReportFormat", ".", "printout", ")", ":", "table", "=", "self", ".", "_generate_table_", "(", ")", "if", "format", "==", "ReportFormat", ".", "printout", ":", "print", "(", "tabulate", "(", "table", ",",...
Render the report in the specified format :param format: The format. The default format is to print the report to the console. :returns: If the format was set to 'string' then a string representation of the report is returned.
[ "Render", "the", "report", "in", "the", "specified", "format" ]
python
valid
six8/polydatum
src/polydatum/context.py
https://github.com/six8/polydatum/blob/c98a498f8e7972218903ec027f6de78089726c1d/src/polydatum/context.py#L239-L275
def _exit(self, obj, type, value, traceback): """ Teardown a Resource or Middleware. """ if type is None: # No in-context exception occurred try: obj.next() except StopIteration: # Resource closed as expected return else: raise RuntimeError('{} yielded more than once.'.format(obj)) else: # In-context exception occurred try: obj.throw(type, value, traceback) raise RuntimeError('{} did not close after throw()'.format(obj)) except StopIteration as exc: # Suppress the exception *unless* it's the same exception that # was passed to throw(). This prevents a StopIteration # raised inside the "with" statement from being suppressed return exc is not value except: # only re-raise if it's *not* the exception that was # passed to throw(), because __exit__() must not raise # an exception unless __exit__() itself failed. But # resource.throw() will raise the exception to signal propagation, # so this fixes the impedance mismatch between the throw() protocol # and the __exit__() protocol. # # Middleware or Resources that throw exceptions before yielding # will just rethrow the same exception here which is expected. They # won't have a chance to do anything about the exception though which # seems OK since they never got to the point of being ready anyway. if sys.exc_info()[1] is not value: raise
[ "def", "_exit", "(", "self", ",", "obj", ",", "type", ",", "value", ",", "traceback", ")", ":", "if", "type", "is", "None", ":", "# No in-context exception occurred", "try", ":", "obj", ".", "next", "(", ")", "except", "StopIteration", ":", "# Resource clo...
Teardown a Resource or Middleware.
[ "Teardown", "a", "Resource", "or", "Middleware", "." ]
python
test
akissa/spamc
setup.py
https://github.com/akissa/spamc/blob/da50732e276f7ed3d67cb75c31cb017d6a62f066/setup.py#L38-L56
def get_readme(): """Generate long description""" pandoc = None for path in os.environ["PATH"].split(os.pathsep): path = path.strip('"') pandoc = os.path.join(path, 'pandoc') if os.path.isfile(pandoc) and os.access(pandoc, os.X_OK): break else: pandoc = None try: if pandoc: cmd = [pandoc, '-t', 'rst', 'README.md'] long_description = os.popen(' '.join(cmd)).read() else: raise ValueError except BaseException: long_description = open("README.md").read() return long_description
[ "def", "get_readme", "(", ")", ":", "pandoc", "=", "None", "for", "path", "in", "os", ".", "environ", "[", "\"PATH\"", "]", ".", "split", "(", "os", ".", "pathsep", ")", ":", "path", "=", "path", ".", "strip", "(", "'\"'", ")", "pandoc", "=", "os...
Generate long description
[ "Generate", "long", "description" ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/brocade_common_def.py#L1151-L1162
def router_fabric_virtual_gateway_address_family_ipv4_accept_unicast_arp_request(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") router = ET.SubElement(config, "router", xmlns="urn:brocade.com:mgmt:brocade-common-def") fabric_virtual_gateway = ET.SubElement(router, "fabric-virtual-gateway", xmlns="urn:brocade.com:mgmt:brocade-anycast-gateway") address_family = ET.SubElement(fabric_virtual_gateway, "address-family") ipv4 = ET.SubElement(address_family, "ipv4") accept_unicast_arp_request = ET.SubElement(ipv4, "accept-unicast-arp-request") callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "router_fabric_virtual_gateway_address_family_ipv4_accept_unicast_arp_request", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "router", "=", "ET", ".", "SubElement", "(", "config", ",", "\"route...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
paramiko/paramiko
paramiko/ssh_gss.py
https://github.com/paramiko/paramiko/blob/cf7d49d66f3b1fbc8b0853518a54050182b3b5eb/paramiko/ssh_gss.py#L483-L508
def ssh_get_mic(self, session_id, gss_kex=False): """ Create the MIC token for a SSH2 message. :param str session_id: The SSH session ID :param bool gss_kex: Generate the MIC for Key Exchange with SSPI or not :return: gssapi-with-mic: Returns the MIC token from SSPI for the message we created with ``_ssh_build_mic``. gssapi-keyex: Returns the MIC token from SSPI with the SSH session ID as message. """ self._session_id = session_id if not gss_kex: mic_field = self._ssh_build_mic( self._session_id, self._username, self._service, self._auth_method, ) mic_token = self._gss_ctxt.sign(mic_field) else: # for key exchange with gssapi-keyex mic_token = self._gss_srv_ctxt.sign(self._session_id) return mic_token
[ "def", "ssh_get_mic", "(", "self", ",", "session_id", ",", "gss_kex", "=", "False", ")", ":", "self", ".", "_session_id", "=", "session_id", "if", "not", "gss_kex", ":", "mic_field", "=", "self", ".", "_ssh_build_mic", "(", "self", ".", "_session_id", ",",...
Create the MIC token for a SSH2 message. :param str session_id: The SSH session ID :param bool gss_kex: Generate the MIC for Key Exchange with SSPI or not :return: gssapi-with-mic: Returns the MIC token from SSPI for the message we created with ``_ssh_build_mic``. gssapi-keyex: Returns the MIC token from SSPI with the SSH session ID as message.
[ "Create", "the", "MIC", "token", "for", "a", "SSH2", "message", "." ]
python
train
EUDAT-B2SAFE/B2HANDLE
b2handle/utilhandle.py
https://github.com/EUDAT-B2SAFE/B2HANDLE/blob/a6d216d459644e01fbdfd5b318a535950bc5cdbb/b2handle/utilhandle.py#L120-L154
def make_request_log_message(**args): ''' Creates a string containing all relevant information about a request made to the Handle System, for logging purposes. :handle: The handle that the request is about. :url: The url the request is sent to. :headers: The headers sent along with the request. :verify: Boolean parameter passed to the requests module (https verification). :resp: The request's response. :op: The library operation during which the request was sent. :payload: Optional. The payload sent with the request. :return: A formatted string. ''' mandatory_args = ['op', 'handle', 'url', 'headers', 'verify', 'resp'] optional_args = ['payload'] util.check_presence_of_mandatory_args(args, mandatory_args) util.add_missing_optional_args_with_value_none(args, optional_args) space = '\n ' message = '' message += '\n'+args['op']+' '+args['handle'] message += space+'URL: '+args['url'] message += space+'HEADERS: '+str(args['headers']) message += space+'VERIFY: '+str(args['verify']) if 'payload' in args.keys(): message += space+'PAYLOAD:'+space+str(args['payload']) message += space+'RESPONSECODE: '+str(args['resp'].status_code) message += space+'RESPONSE:'+space+str(args['resp'].content) return message
[ "def", "make_request_log_message", "(", "*", "*", "args", ")", ":", "mandatory_args", "=", "[", "'op'", ",", "'handle'", ",", "'url'", ",", "'headers'", ",", "'verify'", ",", "'resp'", "]", "optional_args", "=", "[", "'payload'", "]", "util", ".", "check_p...
Creates a string containing all relevant information about a request made to the Handle System, for logging purposes. :handle: The handle that the request is about. :url: The url the request is sent to. :headers: The headers sent along with the request. :verify: Boolean parameter passed to the requests module (https verification). :resp: The request's response. :op: The library operation during which the request was sent. :payload: Optional. The payload sent with the request. :return: A formatted string.
[ "Creates", "a", "string", "containing", "all", "relevant", "information", "about", "a", "request", "made", "to", "the", "Handle", "System", "for", "logging", "purposes", "." ]
python
train
bhearsum/chunkify
chunkify/__init__.py
https://github.com/bhearsum/chunkify/blob/f3a693b17c80626852523955bf3c01b4fd93439b/chunkify/__init__.py#L8-L33
def split_evenly(n, chunks): """Split an integer into evenly distributed list >>> split_evenly(7, 3) [3, 2, 2] >>> split_evenly(12, 3) [4, 4, 4] >>> split_evenly(35, 10) [4, 4, 4, 4, 4, 3, 3, 3, 3, 3] >>> split_evenly(1, 2) Traceback (most recent call last): ... ChunkingError: Number of chunks is greater than number """ if n < chunks: raise ChunkingError("Number of chunks is greater than number") if n % chunks == 0: # Either we can evenly split or only 1 chunk left return [n / chunks] * chunks # otherwise the current chunk should be a bit larger max_size = n / chunks + 1 return [max_size] + split_evenly(n - max_size, chunks - 1)
[ "def", "split_evenly", "(", "n", ",", "chunks", ")", ":", "if", "n", "<", "chunks", ":", "raise", "ChunkingError", "(", "\"Number of chunks is greater than number\"", ")", "if", "n", "%", "chunks", "==", "0", ":", "# Either we can evenly split or only 1 chunk left",...
Split an integer into evenly distributed list >>> split_evenly(7, 3) [3, 2, 2] >>> split_evenly(12, 3) [4, 4, 4] >>> split_evenly(35, 10) [4, 4, 4, 4, 4, 3, 3, 3, 3, 3] >>> split_evenly(1, 2) Traceback (most recent call last): ... ChunkingError: Number of chunks is greater than number
[ "Split", "an", "integer", "into", "evenly", "distributed", "list" ]
python
train
psd-tools/psd-tools
src/psd_tools/api/layers.py
https://github.com/psd-tools/psd-tools/blob/4952b57bcf1cf2c1f16fd9d6d51d4fa0b53bce4e/src/psd_tools/api/layers.py#L342-L351
def compose(self, *args, **kwargs): """ Compose layer and masks (mask, vector mask, and clipping layers). :return: :py:class:`PIL.Image`, or `None` if the layer has no pixel. """ from psd_tools.api.composer import compose_layer if self.bbox == (0, 0, 0, 0): return None return compose_layer(self, *args, **kwargs)
[ "def", "compose", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "psd_tools", ".", "api", ".", "composer", "import", "compose_layer", "if", "self", ".", "bbox", "==", "(", "0", ",", "0", ",", "0", ",", "0", ")", ":", ...
Compose layer and masks (mask, vector mask, and clipping layers). :return: :py:class:`PIL.Image`, or `None` if the layer has no pixel.
[ "Compose", "layer", "and", "masks", "(", "mask", "vector", "mask", "and", "clipping", "layers", ")", "." ]
python
train
LionelAuroux/pyrser
pyrser/type_system/inference.py
https://github.com/LionelAuroux/pyrser/blob/f153a97ef2b6bf915a1ed468c0252a9a59b754d5/pyrser/type_system/inference.py#L228-L246
def infer_id(self, ident, diagnostic=None): """ Infer type from an ID! - check if ID is declarated in the scope - if no ID is polymorphic type """ # check if ID is declared #defined = self.type_node.get_by_symbol_name(ident) defined = self.infer_node.scope_node.get_by_symbol_name(ident) if len(defined) > 0: # set from matchings declarations #self.type_node.update(defined) self.infer_node.scope_node.update(defined) else: diagnostic.notify( Severity.ERROR, "%s never declared" % self.value, self.info )
[ "def", "infer_id", "(", "self", ",", "ident", ",", "diagnostic", "=", "None", ")", ":", "# check if ID is declared", "#defined = self.type_node.get_by_symbol_name(ident)", "defined", "=", "self", ".", "infer_node", ".", "scope_node", ".", "get_by_symbol_name", "(", "i...
Infer type from an ID! - check if ID is declarated in the scope - if no ID is polymorphic type
[ "Infer", "type", "from", "an", "ID!", "-", "check", "if", "ID", "is", "declarated", "in", "the", "scope", "-", "if", "no", "ID", "is", "polymorphic", "type" ]
python
test
apple/turicreate
deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py
https://github.com/apple/turicreate/blob/74514c3f99e25b46f22c6e02977fe3da69221c2e/deps/src/boost_1_68_0/tools/build/src/build/virtual_target.py#L248-L261
def add_suffix (self, specified_name, file_type, prop_set): """ Appends the suffix appropriate to 'type/property_set' combination to the specified name and returns the result. """ assert isinstance(specified_name, basestring) assert isinstance(file_type, basestring) assert isinstance(prop_set, property_set.PropertySet) suffix = b2.build.type.generated_target_suffix (file_type, prop_set) if suffix: return specified_name + '.' + suffix else: return specified_name
[ "def", "add_suffix", "(", "self", ",", "specified_name", ",", "file_type", ",", "prop_set", ")", ":", "assert", "isinstance", "(", "specified_name", ",", "basestring", ")", "assert", "isinstance", "(", "file_type", ",", "basestring", ")", "assert", "isinstance",...
Appends the suffix appropriate to 'type/property_set' combination to the specified name and returns the result.
[ "Appends", "the", "suffix", "appropriate", "to", "type", "/", "property_set", "combination", "to", "the", "specified", "name", "and", "returns", "the", "result", "." ]
python
train
datastore/datastore
datastore/core/basic.py
https://github.com/datastore/datastore/blob/7ccf0cd4748001d3dbf5e6dda369b0f63e0269d3/datastore/core/basic.py#L1138-L1154
def shard_query_generator(self, query): '''A generator that queries each shard in sequence.''' shard_query = query.copy() for shard in self._stores: # yield all items matching within this shard cursor = shard.query(shard_query) for item in cursor: yield item # update query with results of first query shard_query.offset = max(shard_query.offset - cursor.skipped, 0) if shard_query.limit: shard_query.limit = max(shard_query.limit - cursor.returned, 0) if shard_query.limit <= 0: break
[ "def", "shard_query_generator", "(", "self", ",", "query", ")", ":", "shard_query", "=", "query", ".", "copy", "(", ")", "for", "shard", "in", "self", ".", "_stores", ":", "# yield all items matching within this shard", "cursor", "=", "shard", ".", "query", "(...
A generator that queries each shard in sequence.
[ "A", "generator", "that", "queries", "each", "shard", "in", "sequence", "." ]
python
train
ContextLab/quail
quail/load.py
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/load.py#L87-L121
def load_egg(filepath, update=True): """ Loads pickled egg Parameters ---------- filepath : str Location of pickled egg update : bool If true, updates egg to latest format Returns ---------- egg : Egg data object A loaded unpickled egg """ try: egg = Egg(**dd.io.load(filepath)) except: # if error, try loading old format with open(filepath, 'rb') as f: egg = pickle.load(f) if update: if egg.meta: old_meta = egg.meta egg.crack() egg.meta = old_meta return egg else: return egg.crack() else: return egg
[ "def", "load_egg", "(", "filepath", ",", "update", "=", "True", ")", ":", "try", ":", "egg", "=", "Egg", "(", "*", "*", "dd", ".", "io", ".", "load", "(", "filepath", ")", ")", "except", ":", "# if error, try loading old format", "with", "open", "(", ...
Loads pickled egg Parameters ---------- filepath : str Location of pickled egg update : bool If true, updates egg to latest format Returns ---------- egg : Egg data object A loaded unpickled egg
[ "Loads", "pickled", "egg" ]
python
train
Damgaard/PyImgur
pyimgur/__init__.py
https://github.com/Damgaard/PyImgur/blob/606f17078d24158632f807430f8d0b9b3cd8b312/pyimgur/__init__.py#L432-L437
def get_replies(self): """Get the replies to this comment.""" url = self._imgur._base_url + "/3/comment/{0}/replies".format(self.id) json = self._imgur._send_request(url) child_comments = json['children'] return [Comment(com, self._imgur) for com in child_comments]
[ "def", "get_replies", "(", "self", ")", ":", "url", "=", "self", ".", "_imgur", ".", "_base_url", "+", "\"/3/comment/{0}/replies\"", ".", "format", "(", "self", ".", "id", ")", "json", "=", "self", ".", "_imgur", ".", "_send_request", "(", "url", ")", ...
Get the replies to this comment.
[ "Get", "the", "replies", "to", "this", "comment", "." ]
python
train
square/pylink
setup.py
https://github.com/square/pylink/blob/81dda0a191d923a8b2627c52cb778aba24d279d7/setup.py#L111-L121
def finalize_options(self): """Finalizes the command's options. Args: self (CoverageCommand): the ``CoverageCommand`` instance Returns: ``None`` """ self.cwd = os.path.abspath(os.path.dirname(__file__)) self.test_dir = os.path.join(self.cwd, 'tests')
[ "def", "finalize_options", "(", "self", ")", ":", "self", ".", "cwd", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "dirname", "(", "__file__", ")", ")", "self", ".", "test_dir", "=", "os", ".", "path", ".", "join", "(", "s...
Finalizes the command's options. Args: self (CoverageCommand): the ``CoverageCommand`` instance Returns: ``None``
[ "Finalizes", "the", "command", "s", "options", "." ]
python
train
chrismattmann/nutch-python
nutch/crawl.py
https://github.com/chrismattmann/nutch-python/blob/07ae182e283b2f74ef062ddfa20a690a59ab6f5a/nutch/crawl.py#L38-L51
def crawl_cmd(self, seed_list, n): ''' Runs the crawl job for n rounds :param seed_list: lines of seed URLs :param n: number of rounds :return: number of successful rounds ''' print("Num Rounds "+str(n)) cc = self.proxy.Crawl(seed=seed_list, rounds=n) rounds = cc.waitAll() print("Completed %d rounds" % len(rounds)) return len(rounds)
[ "def", "crawl_cmd", "(", "self", ",", "seed_list", ",", "n", ")", ":", "print", "(", "\"Num Rounds \"", "+", "str", "(", "n", ")", ")", "cc", "=", "self", ".", "proxy", ".", "Crawl", "(", "seed", "=", "seed_list", ",", "rounds", "=", "n", ")", "r...
Runs the crawl job for n rounds :param seed_list: lines of seed URLs :param n: number of rounds :return: number of successful rounds
[ "Runs", "the", "crawl", "job", "for", "n", "rounds", ":", "param", "seed_list", ":", "lines", "of", "seed", "URLs", ":", "param", "n", ":", "number", "of", "rounds", ":", "return", ":", "number", "of", "successful", "rounds" ]
python
train
bitshares/uptick
uptick/account.py
https://github.com/bitshares/uptick/blob/66c102200fdbf96cef4fd55cc69d00e690f62001/uptick/account.py#L269-L273
def blacklist(ctx, blacklist_account, account): """ Add an account to a blacklist """ account = Account(account, blockchain_instance=ctx.blockchain) print_tx(account.blacklist(blacklist_account))
[ "def", "blacklist", "(", "ctx", ",", "blacklist_account", ",", "account", ")", ":", "account", "=", "Account", "(", "account", ",", "blockchain_instance", "=", "ctx", ".", "blockchain", ")", "print_tx", "(", "account", ".", "blacklist", "(", "blacklist_account...
Add an account to a blacklist
[ "Add", "an", "account", "to", "a", "blacklist" ]
python
train
NarrativeScience/lsi
src/lsi/utils/hosts.py
https://github.com/NarrativeScience/lsi/blob/7d901b03fdb1a34ef795e5412bfe9685d948e32d/src/lsi/utils/hosts.py#L462-L472
def get_region(): """Use the environment to get the current region""" global _REGION if _REGION is None: region_name = os.getenv("AWS_DEFAULT_REGION") or "us-east-1" region_dict = {r.name: r for r in boto.regioninfo.get_regions("ec2")} if region_name not in region_dict: raise ValueError("No such EC2 region: {}. Check AWS_DEFAULT_REGION " "environment variable".format(region_name)) _REGION = region_dict[region_name] return _REGION
[ "def", "get_region", "(", ")", ":", "global", "_REGION", "if", "_REGION", "is", "None", ":", "region_name", "=", "os", ".", "getenv", "(", "\"AWS_DEFAULT_REGION\"", ")", "or", "\"us-east-1\"", "region_dict", "=", "{", "r", ".", "name", ":", "r", "for", "...
Use the environment to get the current region
[ "Use", "the", "environment", "to", "get", "the", "current", "region" ]
python
test
antocuni/pdb
pdb.py
https://github.com/antocuni/pdb/blob/a88be00d31f1ff38e26711a1d99589d830524c9e/pdb.py#L1218-L1225
def is_skipped_module(self, module_name): """Backport for https://bugs.python.org/issue36130. Fixed in Python 3.8+. """ if module_name is None: return False return super(Pdb, self).is_skipped_module(module_name)
[ "def", "is_skipped_module", "(", "self", ",", "module_name", ")", ":", "if", "module_name", "is", "None", ":", "return", "False", "return", "super", "(", "Pdb", ",", "self", ")", ".", "is_skipped_module", "(", "module_name", ")" ]
Backport for https://bugs.python.org/issue36130. Fixed in Python 3.8+.
[ "Backport", "for", "https", ":", "//", "bugs", ".", "python", ".", "org", "/", "issue36130", "." ]
python
train
closeio/tasktiger
tasktiger/worker.py
https://github.com/closeio/tasktiger/blob/59f893152d6eb4b7f1f62fc4b35aeeca7f26c07a/tasktiger/worker.py#L122-L142
def _filter_queues(self, queues): """ Applies the queue filter to the given list of queues and returns the queues that match. Note that a queue name matches any subqueues starting with the name, followed by a date. For example, "foo" will match both "foo" and "foo.bar". """ def match(queue): """ Returns whether the given queue should be included by checking each part of the queue name. """ for part in reversed_dotted_parts(queue): if part in self.exclude_queues: return False if part in self.only_queues: return True return not self.only_queues return [q for q in queues if match(q)]
[ "def", "_filter_queues", "(", "self", ",", "queues", ")", ":", "def", "match", "(", "queue", ")", ":", "\"\"\"\n Returns whether the given queue should be included by checking each\n part of the queue name.\n \"\"\"", "for", "part", "in", "reverse...
Applies the queue filter to the given list of queues and returns the queues that match. Note that a queue name matches any subqueues starting with the name, followed by a date. For example, "foo" will match both "foo" and "foo.bar".
[ "Applies", "the", "queue", "filter", "to", "the", "given", "list", "of", "queues", "and", "returns", "the", "queues", "that", "match", ".", "Note", "that", "a", "queue", "name", "matches", "any", "subqueues", "starting", "with", "the", "name", "followed", ...
python
train
cloud-custodian/cloud-custodian
c7n/reports/csvout.py
https://github.com/cloud-custodian/cloud-custodian/blob/52ef732eb3d7bc939d1579faf519314814695c08/c7n/reports/csvout.py#L238-L278
def record_set(session_factory, bucket, key_prefix, start_date, specify_hour=False): """Retrieve all s3 records for the given policy output url From the given start date. """ s3 = local_session(session_factory).client('s3') records = [] key_count = 0 date = start_date.strftime('%Y/%m/%d') if specify_hour: date += "/{}".format(start_date.hour) else: date += "/00" marker = "{}/{}/resources.json.gz".format(key_prefix.strip("/"), date) p = s3.get_paginator('list_objects_v2').paginate( Bucket=bucket, Prefix=key_prefix.strip('/') + '/', StartAfter=marker, ) with ThreadPoolExecutor(max_workers=20) as w: for key_set in p: if 'Contents' not in key_set: continue keys = [k for k in key_set['Contents'] if k['Key'].endswith('resources.json.gz')] key_count += len(keys) futures = map(lambda k: w.submit( get_records, bucket, k, session_factory), keys) for f in as_completed(futures): records.extend(f.result()) log.info("Fetched %d records across %d files" % ( len(records), key_count)) return records
[ "def", "record_set", "(", "session_factory", ",", "bucket", ",", "key_prefix", ",", "start_date", ",", "specify_hour", "=", "False", ")", ":", "s3", "=", "local_session", "(", "session_factory", ")", ".", "client", "(", "'s3'", ")", "records", "=", "[", "]...
Retrieve all s3 records for the given policy output url From the given start date.
[ "Retrieve", "all", "s3", "records", "for", "the", "given", "policy", "output", "url" ]
python
train
globality-corp/microcosm-postgres
microcosm_postgres/store.py
https://github.com/globality-corp/microcosm-postgres/blob/43dd793b1fc9b84e4056700f350e79e0df5ff501/microcosm_postgres/store.py#L139-L149
def replace(self, identifier, new_instance): """ Create or update a model. """ try: # Note that `self.update()` ultimately calls merge, which will not enforce # a strict replacement; absent fields will default to the current values. return self.update(identifier, new_instance) except ModelNotFoundError: return self.create(new_instance)
[ "def", "replace", "(", "self", ",", "identifier", ",", "new_instance", ")", ":", "try", ":", "# Note that `self.update()` ultimately calls merge, which will not enforce", "# a strict replacement; absent fields will default to the current values.", "return", "self", ".", "update", ...
Create or update a model.
[ "Create", "or", "update", "a", "model", "." ]
python
train
PythonCharmers/python-future
src/future/backports/email/_header_value_parser.py
https://github.com/PythonCharmers/python-future/blob/c423752879acc05eebc29b0bb9909327bd5c7308/src/future/backports/email/_header_value_parser.py#L2165-L2219
def get_mailbox_list(value): """ mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS]) For this routine we go outside the formal grammar in order to improve error handling. We recognize the end of the mailbox list only at the end of the value or at a ';' (the group terminator). This is so that we can turn invalid mailboxes into InvalidMailbox tokens and continue parsing any remaining valid mailboxes. We also allow all mailbox entries to be null, and this condition is handled appropriately at a higher level. """ mailbox_list = MailboxList() while value and value[0] != ';': try: token, value = get_mailbox(value) mailbox_list.append(token) except errors.HeaderParseError: leader = None if value[0] in CFWS_LEADER: leader, value = get_cfws(value) if not value or value[0] in ',;': mailbox_list.append(leader) mailbox_list.defects.append(errors.ObsoleteHeaderDefect( "empty element in mailbox-list")) else: token, value = get_invalid_mailbox(value, ',;') if leader is not None: token[:0] = [leader] mailbox_list.append(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( "invalid mailbox in mailbox-list")) elif value[0] == ',': mailbox_list.defects.append(errors.ObsoleteHeaderDefect( "empty element in mailbox-list")) else: token, value = get_invalid_mailbox(value, ',;') if leader is not None: token[:0] = [leader] mailbox_list.append(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( "invalid mailbox in mailbox-list")) if value and value[0] not in ',;': # Crap after mailbox; treat it as an invalid mailbox. # The mailbox info will still be available. mailbox = mailbox_list[-1] mailbox.token_type = 'invalid-mailbox' token, value = get_invalid_mailbox(value, ',;') mailbox.extend(token) mailbox_list.defects.append(errors.InvalidHeaderDefect( "invalid mailbox in mailbox-list")) if value and value[0] == ',': mailbox_list.append(ListSeparator) value = value[1:] return mailbox_list, value
[ "def", "get_mailbox_list", "(", "value", ")", ":", "mailbox_list", "=", "MailboxList", "(", ")", "while", "value", "and", "value", "[", "0", "]", "!=", "';'", ":", "try", ":", "token", ",", "value", "=", "get_mailbox", "(", "value", ")", "mailbox_list", ...
mailbox-list = (mailbox *("," mailbox)) / obs-mbox-list obs-mbox-list = *([CFWS] ",") mailbox *("," [mailbox / CFWS]) For this routine we go outside the formal grammar in order to improve error handling. We recognize the end of the mailbox list only at the end of the value or at a ';' (the group terminator). This is so that we can turn invalid mailboxes into InvalidMailbox tokens and continue parsing any remaining valid mailboxes. We also allow all mailbox entries to be null, and this condition is handled appropriately at a higher level.
[ "mailbox", "-", "list", "=", "(", "mailbox", "*", "(", "mailbox", "))", "/", "obs", "-", "mbox", "-", "list", "obs", "-", "mbox", "-", "list", "=", "*", "(", "[", "CFWS", "]", ")", "mailbox", "*", "(", "[", "mailbox", "/", "CFWS", "]", ")" ]
python
train
envi-idl/envipyarclib
envipyarclib/system.py
https://github.com/envi-idl/envipyarclib/blob/90135652510c3d53c5f51177252c1fea2639bf22/envipyarclib/system.py#L20-L25
def appdata_roaming_dir(): """Returns the roaming AppData directory for the installed ArcGIS Desktop.""" install = arcpy.GetInstallInfo('desktop') app_data = arcpy.GetSystemEnvironment("APPDATA") product_dir = ''.join((install['ProductName'], major_version())) return os.path.join(app_data, 'ESRI', product_dir)
[ "def", "appdata_roaming_dir", "(", ")", ":", "install", "=", "arcpy", ".", "GetInstallInfo", "(", "'desktop'", ")", "app_data", "=", "arcpy", ".", "GetSystemEnvironment", "(", "\"APPDATA\"", ")", "product_dir", "=", "''", ".", "join", "(", "(", "install", "[...
Returns the roaming AppData directory for the installed ArcGIS Desktop.
[ "Returns", "the", "roaming", "AppData", "directory", "for", "the", "installed", "ArcGIS", "Desktop", "." ]
python
train
saltstack/salt
salt/modules/wordpress.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/wordpress.py#L175-L225
def install(path, user, admin_user, admin_password, admin_email, title, url): ''' Run the initial setup functions for a wordpress install path path to wordpress install location user user to run the command as admin_user Username for the Administrative user for the wordpress install admin_password Initial Password for the Administrative user for the wordpress install admin_email Email for the Administrative user for the wordpress install title Title of the wordpress website for the wordpress install url Url for the wordpress install CLI Example: .. code-block:: bash salt '*' wordpress.install /var/www/html apache dwallace password123 \ dwallace@example.com "Daniel's Awesome Blog" https://blog.dwallace.com ''' retcode = __salt__['cmd.retcode'](( 'wp --path={0} core install ' '--title="{1}" ' '--admin_user={2} ' "--admin_password='{3}' " '--admin_email={4} ' '--url={5}' ).format( path, title, admin_user, admin_password, admin_email, url ), runas=user) if retcode == 0: return True return False
[ "def", "install", "(", "path", ",", "user", ",", "admin_user", ",", "admin_password", ",", "admin_email", ",", "title", ",", "url", ")", ":", "retcode", "=", "__salt__", "[", "'cmd.retcode'", "]", "(", "(", "'wp --path={0} core install '", "'--title=\"{1}\" '", ...
Run the initial setup functions for a wordpress install path path to wordpress install location user user to run the command as admin_user Username for the Administrative user for the wordpress install admin_password Initial Password for the Administrative user for the wordpress install admin_email Email for the Administrative user for the wordpress install title Title of the wordpress website for the wordpress install url Url for the wordpress install CLI Example: .. code-block:: bash salt '*' wordpress.install /var/www/html apache dwallace password123 \ dwallace@example.com "Daniel's Awesome Blog" https://blog.dwallace.com
[ "Run", "the", "initial", "setup", "functions", "for", "a", "wordpress", "install" ]
python
train
python-diamond/Diamond
src/diamond/server.py
https://github.com/python-diamond/Diamond/blob/0f3eb04327d6d3ed5e53a9967d6c9d2c09714a47/src/diamond/server.py#L64-L227
def run(self): """ Load handler and collector classes and then start collectors """ ####################################################################### # Config ####################################################################### self.config = load_config(self.configfile) collectors = load_collectors(self.config['server']['collectors_path']) metric_queue_size = int(self.config['server'].get('metric_queue_size', 16384)) self.metric_queue = self.manager.Queue(maxsize=metric_queue_size) self.log.debug('metric_queue_size: %d', metric_queue_size) ####################################################################### # Handlers # # TODO: Eventually move each handler to it's own process space? ####################################################################### if 'handlers_path' in self.config['server']: handlers_path = self.config['server']['handlers_path'] # Make an list if not one if isinstance(handlers_path, basestring): handlers_path = handlers_path.split(',') handlers_path = map(str.strip, handlers_path) self.config['server']['handlers_path'] = handlers_path load_include_path(handlers_path) if 'handlers' not in self.config['server']: self.log.critical('handlers missing from server section in config') sys.exit(1) handlers = self.config['server'].get('handlers') if isinstance(handlers, basestring): handlers = [handlers] # Prevent the Queue Handler from being a normal handler if 'diamond.handler.queue.QueueHandler' in handlers: handlers.remove('diamond.handler.queue.QueueHandler') self.handlers = load_handlers(self.config, handlers) QueueHandler = load_dynamic_class( 'diamond.handler.queue.QueueHandler', Handler ) self.handler_queue = QueueHandler( config=self.config, queue=self.metric_queue, log=self.log) handlers_process = multiprocessing.Process( name="Handlers", target=handler_process, args=(self.handlers, self.metric_queue, self.log), ) handlers_process.daemon = True handlers_process.start() ####################################################################### # Signals ####################################################################### if hasattr(signal, 'SIGHUP'): signal.signal(signal.SIGHUP, signal_to_exception) ####################################################################### while True: try: active_children = multiprocessing.active_children() running_processes = [] for process in active_children: running_processes.append(process.name) running_processes = set(running_processes) ############################################################## # Collectors ############################################################## running_collectors = [] for collector, config in self.config['collectors'].iteritems(): if config.get('enabled', False) is not True: continue running_collectors.append(collector) running_collectors = set(running_collectors) # Collectors that are running but shouldn't be for process_name in running_processes - running_collectors: if 'Collector' not in process_name: continue for process in active_children: if process.name == process_name: process.terminate() collector_classes = dict( (cls.__name__.split('.')[-1], cls) for cls in collectors.values() ) load_delay = self.config['server'].get('collectors_load_delay', 1.0) for process_name in running_collectors - running_processes: # To handle running multiple collectors concurrently, we # split on white space and use the first word as the # collector name to spin collector_name = process_name.split()[0] if 'Collector' not in collector_name: continue if collector_name not in collector_classes: self.log.error('Can not find collector %s', collector_name) continue collector = initialize_collector( collector_classes[collector_name], name=process_name, configfile=self.configfile, handlers=[self.handler_queue]) if collector is None: self.log.error('Failed to load collector %s', process_name) continue # Splay the loads time.sleep(float(load_delay)) process = multiprocessing.Process( name=process_name, target=collector_process, args=(collector, self.metric_queue, self.log) ) process.daemon = True process.start() if not handlers_process.is_alive(): self.log.error('Handlers process exited') if (str_to_bool(self.config['server'].get( 'abort_on_handlers_process_exit', 'False'))): raise Exception('Handlers process exited') ############################################################## time.sleep(1) except SIGHUPException: # ignore further SIGHUPs for now original_sighup_handler = signal.getsignal(signal.SIGHUP) signal.signal(signal.SIGHUP, signal.SIG_IGN) self.log.info('Reloading state due to HUP') self.config = load_config(self.configfile) collectors = load_collectors( self.config['server']['collectors_path']) # restore SIGHUP handler signal.signal(signal.SIGHUP, original_sighup_handler)
[ "def", "run", "(", "self", ")", ":", "#######################################################################", "# Config", "#######################################################################", "self", ".", "config", "=", "load_config", "(", "self", ".", "configfile", ")", ...
Load handler and collector classes and then start collectors
[ "Load", "handler", "and", "collector", "classes", "and", "then", "start", "collectors" ]
python
train
arista-eosplus/pyeapi
pyeapi/client.py
https://github.com/arista-eosplus/pyeapi/blob/96a74faef1fe3bd79c4e900aed29c9956a0587d6/pyeapi/client.py#L155-L181
def autoload(self): """ Loads the eapi.conf file This method will use the module variable CONFIG_SEARCH_PATH to attempt to locate a valid eapi.conf file if a filename is not already configured. This method will load the first eapi.conf file it finds and then return. The CONFIG_SEARCH_PATH can be overridden using an environment variable by setting EAPI_CONF. """ path = list(CONFIG_SEARCH_PATH) if 'EAPI_CONF' in os.environ: path = os.environ['EAPI_CONF'] elif self.filename: path = self.filename path = make_iterable(path) for filename in path: filename = os.path.expanduser(filename) if os.path.exists(filename): self.filename = filename return self.read(filename) self._add_default_connection()
[ "def", "autoload", "(", "self", ")", ":", "path", "=", "list", "(", "CONFIG_SEARCH_PATH", ")", "if", "'EAPI_CONF'", "in", "os", ".", "environ", ":", "path", "=", "os", ".", "environ", "[", "'EAPI_CONF'", "]", "elif", "self", ".", "filename", ":", "path...
Loads the eapi.conf file This method will use the module variable CONFIG_SEARCH_PATH to attempt to locate a valid eapi.conf file if a filename is not already configured. This method will load the first eapi.conf file it finds and then return. The CONFIG_SEARCH_PATH can be overridden using an environment variable by setting EAPI_CONF.
[ "Loads", "the", "eapi", ".", "conf", "file" ]
python
train
line/line-bot-sdk-python
linebot/api.py
https://github.com/line/line-bot-sdk-python/blob/1b38bfc2497ff3e3c75be4b50e0f1b7425a07ce0/linebot/api.py#L235-L262
def get_group_member_ids(self, group_id, start=None, timeout=None): """Call get group member IDs API. https://devdocs.line.me/en/#get-group-room-member-ids Gets the user IDs of the members of a group that the bot is in. This includes the user IDs of users who have not added the bot as a friend or has blocked the bot. :param str group_id: Group ID :param str start: continuationToken :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.MemberIds` :return: MemberIds instance """ params = None if start is None else {'start': start} response = self._get( '/v2/bot/group/{group_id}/members/ids'.format(group_id=group_id), params=params, timeout=timeout ) return MemberIds.new_from_json_dict(response.json)
[ "def", "get_group_member_ids", "(", "self", ",", "group_id", ",", "start", "=", "None", ",", "timeout", "=", "None", ")", ":", "params", "=", "None", "if", "start", "is", "None", "else", "{", "'start'", ":", "start", "}", "response", "=", "self", ".", ...
Call get group member IDs API. https://devdocs.line.me/en/#get-group-room-member-ids Gets the user IDs of the members of a group that the bot is in. This includes the user IDs of users who have not added the bot as a friend or has blocked the bot. :param str group_id: Group ID :param str start: continuationToken :param timeout: (optional) How long to wait for the server to send data before giving up, as a float, or a (connect timeout, read timeout) float tuple. Default is self.http_client.timeout :type timeout: float | tuple(float, float) :rtype: :py:class:`linebot.models.responses.MemberIds` :return: MemberIds instance
[ "Call", "get", "group", "member", "IDs", "API", "." ]
python
train
AndrewAnnex/SpiceyPy
spiceypy/spiceypy.py
https://github.com/AndrewAnnex/SpiceyPy/blob/fc20a9b9de68b58eed5b332f0c051fb343a6e335/spiceypy/spiceypy.py#L10564-L10594
def repmf(instr, marker, value, sigdig, informat, lenout=None): """ Replace a marker in a string with a formatted double precision value. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/repmf_c.html :param instr: Input string. :type instr: str :param marker: Marker to be replaced. :type marker: str :param value: Replacement value. :type value: float :param sigdig: Significant digits in replacement text. :type sigdig: int :param informat: Format 'E' or 'F'. :type informat: str :param lenout: Optional available space in output string. :type lenout: int :return: Output string. :rtype: str """ if lenout is None: lenout = ctypes.c_int(len(instr) + len(marker) + 15) instr = stypes.stringToCharP(instr) marker = stypes.stringToCharP(marker) value = ctypes.c_double(value) sigdig = ctypes.c_int(sigdig) informat = ctypes.c_char(informat.encode(encoding='UTF-8')) out = stypes.stringToCharP(lenout) libspice.repmf_c(instr, marker, value, sigdig, informat, lenout, out) return stypes.toPythonString(out)
[ "def", "repmf", "(", "instr", ",", "marker", ",", "value", ",", "sigdig", ",", "informat", ",", "lenout", "=", "None", ")", ":", "if", "lenout", "is", "None", ":", "lenout", "=", "ctypes", ".", "c_int", "(", "len", "(", "instr", ")", "+", "len", ...
Replace a marker in a string with a formatted double precision value. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/repmf_c.html :param instr: Input string. :type instr: str :param marker: Marker to be replaced. :type marker: str :param value: Replacement value. :type value: float :param sigdig: Significant digits in replacement text. :type sigdig: int :param informat: Format 'E' or 'F'. :type informat: str :param lenout: Optional available space in output string. :type lenout: int :return: Output string. :rtype: str
[ "Replace", "a", "marker", "in", "a", "string", "with", "a", "formatted", "double", "precision", "value", "." ]
python
train
chrisjrn/registrasion
registrasion/reporting/views.py
https://github.com/chrisjrn/registrasion/blob/461d5846c6f9f3b7099322a94f5d9911564448e4/registrasion/reporting/views.py#L569-L608
def attendee_list(request): ''' Returns a list of all attendees. ''' attendees = people.Attendee.objects.select_related( "attendeeprofilebase", "user", ) profiles = AttendeeProfile.objects.filter( attendee__in=attendees ).select_related( "attendee", "attendee__user", ) profiles_by_attendee = dict((i.attendee, i) for i in profiles) attendees = attendees.annotate( has_registered=Count( Q(user__invoice__status=commerce.Invoice.STATUS_PAID) ), ) headings = [ "User ID", "Name", "Email", "Has registered", ] data = [] for a in attendees: data.append([ a.user.id, (profiles_by_attendee[a].attendee_name() if a in profiles_by_attendee else ""), a.user.email, a.has_registered > 0, ]) # Sort by whether they've registered, then ID. data.sort(key=lambda a: (-a[3], a[0])) return AttendeeListReport("Attendees", headings, data, link_view=attendee)
[ "def", "attendee_list", "(", "request", ")", ":", "attendees", "=", "people", ".", "Attendee", ".", "objects", ".", "select_related", "(", "\"attendeeprofilebase\"", ",", "\"user\"", ",", ")", "profiles", "=", "AttendeeProfile", ".", "objects", ".", "filter", ...
Returns a list of all attendees.
[ "Returns", "a", "list", "of", "all", "attendees", "." ]
python
test
portfors-lab/sparkle
sparkle/gui/abstract_drag_view.py
https://github.com/portfors-lab/sparkle/blob/5fad1cf2bec58ec6b15d91da20f6236a74826110/sparkle/gui/abstract_drag_view.py#L153-L168
def dropEvent(self, event): """Handles an item being dropped onto view, calls dropped -- implemented by subclass """ super(AbstractDragView, self).dropEvent(event) self.dragStartPosition = None self.dragline = None self.originalPos = None data = event.mimeData() stream = data.retrieveData("application/x-protocol", QtCore.QVariant.ByteArray) item = cPickle.loads(str(stream.toByteArray())) self.dropped(item, event) event.accept()
[ "def", "dropEvent", "(", "self", ",", "event", ")", ":", "super", "(", "AbstractDragView", ",", "self", ")", ".", "dropEvent", "(", "event", ")", "self", ".", "dragStartPosition", "=", "None", "self", ".", "dragline", "=", "None", "self", ".", "originalP...
Handles an item being dropped onto view, calls dropped -- implemented by subclass
[ "Handles", "an", "item", "being", "dropped", "onto", "view", "calls", "dropped", "--", "implemented", "by", "subclass" ]
python
train
sernst/cauldron
cauldron/cli/commander.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/cli/commander.py#L35-L61
def fetch(reload: bool = False) -> dict: """ Returns a dictionary containing all of the available Cauldron commands currently registered. This data is cached for performance. Unless the reload argument is set to True, the command list will only be generated the first time this function is called. :param reload: Whether or not to disregard any cached command data and generate a new dictionary of available commands. :return: A dictionary where the keys are the name of the commands and the values are the modules for the command . """ if len(list(COMMANDS.keys())) > 0 and not reload: return COMMANDS COMMANDS.clear() for key in dir(commands): e = getattr(commands, key) if e and hasattr(e, 'NAME') and hasattr(e, 'DESCRIPTION'): COMMANDS[e.NAME] = e return dict(COMMANDS.items())
[ "def", "fetch", "(", "reload", ":", "bool", "=", "False", ")", "->", "dict", ":", "if", "len", "(", "list", "(", "COMMANDS", ".", "keys", "(", ")", ")", ")", ">", "0", "and", "not", "reload", ":", "return", "COMMANDS", "COMMANDS", ".", "clear", "...
Returns a dictionary containing all of the available Cauldron commands currently registered. This data is cached for performance. Unless the reload argument is set to True, the command list will only be generated the first time this function is called. :param reload: Whether or not to disregard any cached command data and generate a new dictionary of available commands. :return: A dictionary where the keys are the name of the commands and the values are the modules for the command .
[ "Returns", "a", "dictionary", "containing", "all", "of", "the", "available", "Cauldron", "commands", "currently", "registered", ".", "This", "data", "is", "cached", "for", "performance", ".", "Unless", "the", "reload", "argument", "is", "set", "to", "True", "t...
python
train
maas/python-libmaas
maas/client/viscera/__init__.py
https://github.com/maas/python-libmaas/blob/4092c68ef7fb1753efc843569848e2bcc3415002/maas/client/viscera/__init__.py#L96-L114
def dir_instance(inst): """Return a list of names available on `inst`. Eliminates names that bind to an `ObjectMethod` without a corresponding instance method; see `ObjectMethod`. """ # Skip instance attributes; __slots__ is automatically defined, and # descriptors are used to define attributes. Instead, go straight to class # attributes (including methods). for name, value in vars_class(type(inst)).items(): if isinstance(value, ObjectMethod): if value.has_instancemethod: yield name elif isinstance(value, Disabled): pass # Hide this; disabled. elif isinstance(value, (classmethod, staticmethod)): pass # Hide this; not interesting here. else: yield name
[ "def", "dir_instance", "(", "inst", ")", ":", "# Skip instance attributes; __slots__ is automatically defined, and", "# descriptors are used to define attributes. Instead, go straight to class", "# attributes (including methods).", "for", "name", ",", "value", "in", "vars_class", "(", ...
Return a list of names available on `inst`. Eliminates names that bind to an `ObjectMethod` without a corresponding instance method; see `ObjectMethod`.
[ "Return", "a", "list", "of", "names", "available", "on", "inst", "." ]
python
train
MillionIntegrals/vel
vel/rl/buffers/backend/circular_vec_buffer_backend.py
https://github.com/MillionIntegrals/vel/blob/e0726e1f63742b728966ccae0c8b825ea0ba491a/vel/rl/buffers/backend/circular_vec_buffer_backend.py#L207-L242
def get_transitions(self, indexes): """ Get dictionary of transition data """ assert indexes.shape[1] == self.state_buffer.shape[1], \ "Must have the same number of indexes as there are environments" frame_batch_shape = ( [indexes.shape[0], indexes.shape[1]] + list(self.state_buffer.shape[2:-1]) + [self.state_buffer.shape[-1] * self.frame_history] ) past_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype) future_frame_buffer = np.zeros(frame_batch_shape, dtype=self.state_buffer.dtype) for buffer_idx, frame_row in enumerate(indexes): for env_idx, frame_idx in enumerate(frame_row): past_frame_buffer[buffer_idx, env_idx], future_frame_buffer[buffer_idx, env_idx] = ( self.get_frame_with_future(frame_idx, env_idx) ) actions = take_along_axis(self.action_buffer, indexes) rewards = take_along_axis(self.reward_buffer, indexes) dones = take_along_axis(self.dones_buffer, indexes) transition_tensors = { 'observations': past_frame_buffer, 'actions': actions, 'rewards': rewards, 'observations_next': future_frame_buffer, 'dones': dones.astype(np.float32), } for name in self.extra_data: transition_tensors[name] = take_along_axis(self.extra_data[name], indexes) return transition_tensors
[ "def", "get_transitions", "(", "self", ",", "indexes", ")", ":", "assert", "indexes", ".", "shape", "[", "1", "]", "==", "self", ".", "state_buffer", ".", "shape", "[", "1", "]", ",", "\"Must have the same number of indexes as there are environments\"", "frame_bat...
Get dictionary of transition data
[ "Get", "dictionary", "of", "transition", "data" ]
python
train
mwouts/jupytext
jupytext/pandoc.py
https://github.com/mwouts/jupytext/blob/eb7d6aee889f80ad779cfc53441c648f0db9246d/jupytext/pandoc.py#L44-L50
def pandoc_version(): """Pandoc's version number""" version = pandoc(u'--version').splitlines()[0].split()[1] if parse_version(version) < parse_version('2.7.2'): raise PandocError('Please install pandoc>=2.7.2 (found version {})'.format(version)) return version
[ "def", "pandoc_version", "(", ")", ":", "version", "=", "pandoc", "(", "u'--version'", ")", ".", "splitlines", "(", ")", "[", "0", "]", ".", "split", "(", ")", "[", "1", "]", "if", "parse_version", "(", "version", ")", "<", "parse_version", "(", "'2....
Pandoc's version number
[ "Pandoc", "s", "version", "number" ]
python
train
O365/python-o365
O365/utils/token.py
https://github.com/O365/python-o365/blob/02a71cf3775cc6a3c042e003365d6a07c8c75a73/O365/utils/token.py#L211-L221
def delete_token(self): """ Deletes the token from the store :return bool: Success / Failure """ try: self.doc_ref.delete() except Exception as e: log.error('Could not delete the token (key: {}): {}'.format(self.doc_id, str(e))) return False return True
[ "def", "delete_token", "(", "self", ")", ":", "try", ":", "self", ".", "doc_ref", ".", "delete", "(", ")", "except", "Exception", "as", "e", ":", "log", ".", "error", "(", "'Could not delete the token (key: {}): {}'", ".", "format", "(", "self", ".", "doc_...
Deletes the token from the store :return bool: Success / Failure
[ "Deletes", "the", "token", "from", "the", "store", ":", "return", "bool", ":", "Success", "/", "Failure" ]
python
train
spyder-ide/spyder
spyder/plugins/editor/panels/codefolding.py
https://github.com/spyder-ide/spyder/blob/f76836ce1b924bcc4efd3f74f2960d26a4e528e0/spyder/plugins/editor/panels/codefolding.py#L563-L579
def refresh_decorations(self, force=False): """ Refresh decorations colors. This function is called by the syntax highlighter when the style changed so that we may update our decorations colors according to the new style. """ cursor = self.editor.textCursor() if (self._prev_cursor is None or force or self._prev_cursor.blockNumber() != cursor.blockNumber()): for deco in self._block_decos: self.editor.decorations.remove(deco) for deco in self._block_decos: deco.set_outline(drift_color( self._get_scope_highlight_color(), 110)) deco.set_background(self._get_scope_highlight_color()) self.editor.decorations.add(deco) self._prev_cursor = cursor
[ "def", "refresh_decorations", "(", "self", ",", "force", "=", "False", ")", ":", "cursor", "=", "self", ".", "editor", ".", "textCursor", "(", ")", "if", "(", "self", ".", "_prev_cursor", "is", "None", "or", "force", "or", "self", ".", "_prev_cursor", ...
Refresh decorations colors. This function is called by the syntax highlighter when the style changed so that we may update our decorations colors according to the new style.
[ "Refresh", "decorations", "colors", ".", "This", "function", "is", "called", "by", "the", "syntax", "highlighter", "when", "the", "style", "changed", "so", "that", "we", "may", "update", "our", "decorations", "colors", "according", "to", "the", "new", "style",...
python
train
futapi/fut
fut/core.py
https://github.com/futapi/fut/blob/3792c9eee8f5884f38a02210e649c46c6c7a756d/fut/core.py#L1536-L1542
def packs(self): """List all (currently?) available packs.""" method = 'GET' url = 'store/purchaseGroup/cardpack' params = {'ppInfo': True} return self.__request__(method, url, params=params)
[ "def", "packs", "(", "self", ")", ":", "method", "=", "'GET'", "url", "=", "'store/purchaseGroup/cardpack'", "params", "=", "{", "'ppInfo'", ":", "True", "}", "return", "self", ".", "__request__", "(", "method", ",", "url", ",", "params", "=", "params", ...
List all (currently?) available packs.
[ "List", "all", "(", "currently?", ")", "available", "packs", "." ]
python
valid
python-openxml/python-docx
docx/styles/latent.py
https://github.com/python-openxml/python-docx/blob/6756f6cd145511d3eb6d1d188beea391b1ddfd53/docx/styles/latent.py#L40-L48
def add_latent_style(self, name): """ Return a newly added |_LatentStyle| object to override the inherited defaults defined in this latent styles object for the built-in style having *name*. """ lsdException = self._element.add_lsdException() lsdException.name = BabelFish.ui2internal(name) return _LatentStyle(lsdException)
[ "def", "add_latent_style", "(", "self", ",", "name", ")", ":", "lsdException", "=", "self", ".", "_element", ".", "add_lsdException", "(", ")", "lsdException", ".", "name", "=", "BabelFish", ".", "ui2internal", "(", "name", ")", "return", "_LatentStyle", "("...
Return a newly added |_LatentStyle| object to override the inherited defaults defined in this latent styles object for the built-in style having *name*.
[ "Return", "a", "newly", "added", "|_LatentStyle|", "object", "to", "override", "the", "inherited", "defaults", "defined", "in", "this", "latent", "styles", "object", "for", "the", "built", "-", "in", "style", "having", "*", "name", "*", "." ]
python
train
google/grr
grr/server/grr_response_server/flow_base.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/server/grr_response_server/flow_base.py#L496-L510
def Log(self, format_str, *args): """Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string """ log_entry = rdf_flow_objects.FlowLogEntry( client_id=self.rdf_flow.client_id, flow_id=self.rdf_flow.flow_id, hunt_id=self.rdf_flow.parent_hunt_id, message=format_str % args) data_store.REL_DB.WriteFlowLogEntries([log_entry]) if self.rdf_flow.parent_hunt_id: db_compat.ProcessHuntFlowLog(self.rdf_flow, format_str % args)
[ "def", "Log", "(", "self", ",", "format_str", ",", "*", "args", ")", ":", "log_entry", "=", "rdf_flow_objects", ".", "FlowLogEntry", "(", "client_id", "=", "self", ".", "rdf_flow", ".", "client_id", ",", "flow_id", "=", "self", ".", "rdf_flow", ".", "flo...
Logs the message using the flow's standard logging. Args: format_str: Format string *args: arguments to the format string
[ "Logs", "the", "message", "using", "the", "flow", "s", "standard", "logging", "." ]
python
train
inveniosoftware/invenio-oauthclient
invenio_oauthclient/models.py
https://github.com/inveniosoftware/invenio-oauthclient/blob/2500dc6935738107617aeade79e050d7608004bb/invenio_oauthclient/models.py#L166-L188
def get(cls, user_id, client_id, token_type='', access_token=None): """Get RemoteToken for user. :param user_id: The user id. :param client_id: The client id. :param token_type: The token type. (Default: ``''``) :param access_token: If set, will filter also by access token. (Default: ``None``) :returns: A :class:`invenio_oauthclient.models.RemoteToken` instance. """ args = [ RemoteAccount.id == RemoteToken.id_remote_account, RemoteAccount.user_id == user_id, RemoteAccount.client_id == client_id, RemoteToken.token_type == token_type, ] if access_token: args.append(RemoteToken.access_token == access_token) return cls.query.options( db.joinedload('remote_account') ).filter(*args).first()
[ "def", "get", "(", "cls", ",", "user_id", ",", "client_id", ",", "token_type", "=", "''", ",", "access_token", "=", "None", ")", ":", "args", "=", "[", "RemoteAccount", ".", "id", "==", "RemoteToken", ".", "id_remote_account", ",", "RemoteAccount", ".", ...
Get RemoteToken for user. :param user_id: The user id. :param client_id: The client id. :param token_type: The token type. (Default: ``''``) :param access_token: If set, will filter also by access token. (Default: ``None``) :returns: A :class:`invenio_oauthclient.models.RemoteToken` instance.
[ "Get", "RemoteToken", "for", "user", "." ]
python
train
saltstack/salt
salt/transport/ipc.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/transport/ipc.py#L216-L226
def close(self): ''' Routines to handle any cleanup before the instance shuts down. Sockets and filehandles should be closed explicitly, to prevent leaks. ''' if self._closing: return self._closing = True if hasattr(self.sock, 'close'): self.sock.close()
[ "def", "close", "(", "self", ")", ":", "if", "self", ".", "_closing", ":", "return", "self", ".", "_closing", "=", "True", "if", "hasattr", "(", "self", ".", "sock", ",", "'close'", ")", ":", "self", ".", "sock", ".", "close", "(", ")" ]
Routines to handle any cleanup before the instance shuts down. Sockets and filehandles should be closed explicitly, to prevent leaks.
[ "Routines", "to", "handle", "any", "cleanup", "before", "the", "instance", "shuts", "down", ".", "Sockets", "and", "filehandles", "should", "be", "closed", "explicitly", "to", "prevent", "leaks", "." ]
python
train
brechtm/rinohtype
src/rinoh/backend/pdf/xobject/purepng.py
https://github.com/brechtm/rinohtype/blob/40a63c4e5ad7550f62b6860f1812cb67cafb9dc7/src/rinoh/backend/pdf/xobject/purepng.py#L2071-L2077
def read(self, n): """Read `n` chars from buffer""" r = self.buf[self.offset:self.offset + n] if isinstance(r, array): r = r.tostring() self.offset += n return r
[ "def", "read", "(", "self", ",", "n", ")", ":", "r", "=", "self", ".", "buf", "[", "self", ".", "offset", ":", "self", ".", "offset", "+", "n", "]", "if", "isinstance", "(", "r", ",", "array", ")", ":", "r", "=", "r", ".", "tostring", "(", ...
Read `n` chars from buffer
[ "Read", "n", "chars", "from", "buffer" ]
python
train
facetoe/zenpy
zenpy/lib/api_objects/chat_objects.py
https://github.com/facetoe/zenpy/blob/34c54c7e408b9ed01604ddf8b3422204c8bf31ea/zenpy/lib/api_objects/chat_objects.py#L435-L440
def zendesk_ticket(self): """ | Description: The ID of the Zendesk Support ticket created from this chat. Available only if using version 2 of the Zendesk Chat-Support integration """ if self.api and self.zendesk_ticket_id: return self.api._get_zendesk_ticket(self.zendesk_ticket_id)
[ "def", "zendesk_ticket", "(", "self", ")", ":", "if", "self", ".", "api", "and", "self", ".", "zendesk_ticket_id", ":", "return", "self", ".", "api", ".", "_get_zendesk_ticket", "(", "self", ".", "zendesk_ticket_id", ")" ]
| Description: The ID of the Zendesk Support ticket created from this chat. Available only if using version 2 of the Zendesk Chat-Support integration
[ "|", "Description", ":", "The", "ID", "of", "the", "Zendesk", "Support", "ticket", "created", "from", "this", "chat", ".", "Available", "only", "if", "using", "version", "2", "of", "the", "Zendesk", "Chat", "-", "Support", "integration" ]
python
train
ejeschke/ginga
ginga/util/wcs.py
https://github.com/ejeschke/ginga/blob/a78c893ec6f37a837de851947e9bb4625c597915/ginga/util/wcs.py#L553-L556
def deltaStarsRaDecDeg1(ra1_deg, dec1_deg, ra2_deg, dec2_deg): """Spherical triangulation.""" phi, dist = dispos(ra1_deg, dec1_deg, ra2_deg, dec2_deg) return arcsecToDeg(dist * 60.0)
[ "def", "deltaStarsRaDecDeg1", "(", "ra1_deg", ",", "dec1_deg", ",", "ra2_deg", ",", "dec2_deg", ")", ":", "phi", ",", "dist", "=", "dispos", "(", "ra1_deg", ",", "dec1_deg", ",", "ra2_deg", ",", "dec2_deg", ")", "return", "arcsecToDeg", "(", "dist", "*", ...
Spherical triangulation.
[ "Spherical", "triangulation", "." ]
python
train
saltstack/salt
salt/spm/__init__.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/spm/__init__.py#L886-L892
def _verbose(self, msg, level=log.debug): ''' Display verbose information ''' if self.opts.get('verbose', False) is True: self.ui.status(msg) level(msg)
[ "def", "_verbose", "(", "self", ",", "msg", ",", "level", "=", "log", ".", "debug", ")", ":", "if", "self", ".", "opts", ".", "get", "(", "'verbose'", ",", "False", ")", "is", "True", ":", "self", ".", "ui", ".", "status", "(", "msg", ")", "lev...
Display verbose information
[ "Display", "verbose", "information" ]
python
train
materialsproject/pymatgen
pymatgen/core/tensors.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/tensors.py#L917-L922
def get_uvec(vec): """ Gets a unit vector parallel to input vector""" l = np.linalg.norm(vec) if l < 1e-8: return vec return vec / l
[ "def", "get_uvec", "(", "vec", ")", ":", "l", "=", "np", ".", "linalg", ".", "norm", "(", "vec", ")", "if", "l", "<", "1e-8", ":", "return", "vec", "return", "vec", "/", "l" ]
Gets a unit vector parallel to input vector
[ "Gets", "a", "unit", "vector", "parallel", "to", "input", "vector" ]
python
train
wiheto/teneto
teneto/classes/bids.py
https://github.com/wiheto/teneto/blob/80d7a83a9adc1714589b020627c45bd5b66248ab/teneto/classes/bids.py#L360-L398
def make_functional_connectivity(self, njobs=None, returngroup=False, file_hdr=None, file_idx=None): """ Makes connectivity matrix for each of the subjects. Parameters ---------- returngroup : bool, default=False If true, returns the group average connectivity matrix. njobs : int How many parallel jobs to run file_idx : bool Default False, true if to ignore index column in loaded file. file_hdr : bool Default False, true if to ignore header row in loaded file. Returns ------- Saves data in derivatives/teneto_<version>/.../fc/ R_group : array if returngroup is true, the average connectivity matrix is returned. """ if not njobs: njobs = self.njobs self.add_history(inspect.stack()[0][3], locals(), 1) files = self.get_selected_files(quiet=1) R_group = [] with ProcessPoolExecutor(max_workers=njobs) as executor: job = {executor.submit( self._run_make_functional_connectivity, f, file_hdr, file_idx) for f in files} for j in as_completed(job): R_group.append(j.result()) if returngroup: # Fisher tranform -> mean -> inverse fisher tranform R_group = np.tanh(np.mean(np.arctanh(np.array(R_group)), axis=0)) return np.array(R_group)
[ "def", "make_functional_connectivity", "(", "self", ",", "njobs", "=", "None", ",", "returngroup", "=", "False", ",", "file_hdr", "=", "None", ",", "file_idx", "=", "None", ")", ":", "if", "not", "njobs", ":", "njobs", "=", "self", ".", "njobs", "self", ...
Makes connectivity matrix for each of the subjects. Parameters ---------- returngroup : bool, default=False If true, returns the group average connectivity matrix. njobs : int How many parallel jobs to run file_idx : bool Default False, true if to ignore index column in loaded file. file_hdr : bool Default False, true if to ignore header row in loaded file. Returns ------- Saves data in derivatives/teneto_<version>/.../fc/ R_group : array if returngroup is true, the average connectivity matrix is returned.
[ "Makes", "connectivity", "matrix", "for", "each", "of", "the", "subjects", "." ]
python
train
abiiranathan/db2
db2/session.py
https://github.com/abiiranathan/db2/blob/347319e421921517bcae7639f524c3c3eb5446e6/db2/session.py#L297-L304
def exists(self, model_class, ID): '''Check if a record of id==ID exists in table model_class.__name__.lower()''' assert hasattr(model_class, '_fields'), 'Not a valid model class' res = self.get(model_class, id=ID, fetchOne=True) if res: return True return False
[ "def", "exists", "(", "self", ",", "model_class", ",", "ID", ")", ":", "assert", "hasattr", "(", "model_class", ",", "'_fields'", ")", ",", "'Not a valid model class'", "res", "=", "self", ".", "get", "(", "model_class", ",", "id", "=", "ID", ",", "fetch...
Check if a record of id==ID exists in table model_class.__name__.lower()
[ "Check", "if", "a", "record", "of", "id", "==", "ID", "exists", "in", "table", "model_class", ".", "__name__", ".", "lower", "()" ]
python
train
gwpy/gwpy
gwpy/timeseries/timeseries.py
https://github.com/gwpy/gwpy/blob/7a92b917e7dd2d99b15895293a1fa1d66cdb210a/gwpy/timeseries/timeseries.py#L447-L499
def spectrogram2(self, fftlength, overlap=None, window='hann', **kwargs): """Calculate the non-averaged power `Spectrogram` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats scaling : [ 'density' | 'spectrum' ], optional selects between computing the power spectral density ('density') where the `Spectrogram` has units of V**2/Hz if the input is measured in V and computing the power spectrum ('spectrum') where the `Spectrogram` has units of V**2 if the input is measured in V. Defaults to 'density'. **kwargs other parameters to be passed to `scipy.signal.periodogram` for each column of the `Spectrogram` Returns ------- spectrogram: `~gwpy.spectrogram.Spectrogram` a power `Spectrogram` with `1/fftlength` frequency resolution and (fftlength - overlap) time resolution. See also -------- scipy.signal.periodogram for documentation on the Fourier methods used in this calculation Notes ----- This method calculates overlapping periodograms for all possible chunks of data entirely containing within the span of the input `TimeSeries`, then normalises the power in overlapping chunks using a triangular window centred on that chunk which most overlaps the given `Spectrogram` time sample. """ # set kwargs for periodogram() kwargs.setdefault('fs', self.sample_rate.to('Hz').value) # run return spectral.spectrogram(self, signal.periodogram, fftlength=fftlength, overlap=overlap, window=window, **kwargs)
[ "def", "spectrogram2", "(", "self", ",", "fftlength", ",", "overlap", "=", "None", ",", "window", "=", "'hann'", ",", "*", "*", "kwargs", ")", ":", "# set kwargs for periodogram()", "kwargs", ".", "setdefault", "(", "'fs'", ",", "self", ".", "sample_rate", ...
Calculate the non-averaged power `Spectrogram` of this `TimeSeries` Parameters ---------- fftlength : `float` number of seconds in single FFT. overlap : `float`, optional number of seconds of overlap between FFTs, defaults to the recommended overlap for the given window (if given), or 0 window : `str`, `numpy.ndarray`, optional window function to apply to timeseries prior to FFT, see :func:`scipy.signal.get_window` for details on acceptable formats scaling : [ 'density' | 'spectrum' ], optional selects between computing the power spectral density ('density') where the `Spectrogram` has units of V**2/Hz if the input is measured in V and computing the power spectrum ('spectrum') where the `Spectrogram` has units of V**2 if the input is measured in V. Defaults to 'density'. **kwargs other parameters to be passed to `scipy.signal.periodogram` for each column of the `Spectrogram` Returns ------- spectrogram: `~gwpy.spectrogram.Spectrogram` a power `Spectrogram` with `1/fftlength` frequency resolution and (fftlength - overlap) time resolution. See also -------- scipy.signal.periodogram for documentation on the Fourier methods used in this calculation Notes ----- This method calculates overlapping periodograms for all possible chunks of data entirely containing within the span of the input `TimeSeries`, then normalises the power in overlapping chunks using a triangular window centred on that chunk which most overlaps the given `Spectrogram` time sample.
[ "Calculate", "the", "non", "-", "averaged", "power", "Spectrogram", "of", "this", "TimeSeries" ]
python
train
gwastro/pycbc
pycbc/conversions.py
https://github.com/gwastro/pycbc/blob/7a64cdd104d263f1b6ea0b01e6841837d05a4cb3/pycbc/conversions.py#L443-L447
def phi_s(spin1x, spin1y, spin2x, spin2y): """ Returns the sum of the in-plane perpendicular spins.""" phi1 = phi_from_spinx_spiny(spin1x, spin1y) phi2 = phi_from_spinx_spiny(spin2x, spin2y) return (phi1 + phi2) % (2 * numpy.pi)
[ "def", "phi_s", "(", "spin1x", ",", "spin1y", ",", "spin2x", ",", "spin2y", ")", ":", "phi1", "=", "phi_from_spinx_spiny", "(", "spin1x", ",", "spin1y", ")", "phi2", "=", "phi_from_spinx_spiny", "(", "spin2x", ",", "spin2y", ")", "return", "(", "phi1", "...
Returns the sum of the in-plane perpendicular spins.
[ "Returns", "the", "sum", "of", "the", "in", "-", "plane", "perpendicular", "spins", "." ]
python
train
sananth12/ImageScraper
image_scraper/utils.py
https://github.com/sananth12/ImageScraper/blob/04cdefaa184420637d02b5a285cf407bbd428929/image_scraper/utils.py#L138-L167
def get_img_list(self): """ Gets list of images from the page_html. """ tree = html.fromstring(self.page_html) img = tree.xpath('//img/@src') links = tree.xpath('//a/@href') img_list = self.process_links(img) img_links = self.process_links(links) img_list.extend(img_links) if self.filename_pattern: # Compile pattern for efficiency pattern = re.compile(self.filename_pattern) # Verifies filename in the image URL matches pattern def matches_pattern(img_url): """ Function to check if pattern is matched. """ img_filename = urlparse(img_url).path.split('/')[-1] return pattern.search(img_filename) images = [urljoin(self.url, img_url) for img_url in img_list if matches_pattern(img_url)] else: images = [urljoin(self.url, img_url) for img_url in img_list] images = list(set(images)) self.images = images if self.scrape_reverse: self.images.reverse() return self.images
[ "def", "get_img_list", "(", "self", ")", ":", "tree", "=", "html", ".", "fromstring", "(", "self", ".", "page_html", ")", "img", "=", "tree", ".", "xpath", "(", "'//img/@src'", ")", "links", "=", "tree", ".", "xpath", "(", "'//a/@href'", ")", "img_list...
Gets list of images from the page_html.
[ "Gets", "list", "of", "images", "from", "the", "page_html", "." ]
python
train
istresearch/scrapy-cluster
utils/scutils/zookeeper_watcher.py
https://github.com/istresearch/scrapy-cluster/blob/13aaed2349af5d792d6bcbfcadc5563158aeb599/utils/scutils/zookeeper_watcher.py#L133-L146
def state_listener(self, state): ''' Restarts the session if we get anything besides CONNECTED ''' if state == KazooState.SUSPENDED: self.set_valid(False) self.call_error(self.BAD_CONNECTION) elif state == KazooState.LOST and not self.do_not_restart: self.threaded_start() elif state == KazooState.CONNECTED: # This is going to throw a SUSPENDED kazoo error # which will cause the sessions to be wiped and re established. # Used b/c of massive connection pool issues self.zoo_client.stop()
[ "def", "state_listener", "(", "self", ",", "state", ")", ":", "if", "state", "==", "KazooState", ".", "SUSPENDED", ":", "self", ".", "set_valid", "(", "False", ")", "self", ".", "call_error", "(", "self", ".", "BAD_CONNECTION", ")", "elif", "state", "=="...
Restarts the session if we get anything besides CONNECTED
[ "Restarts", "the", "session", "if", "we", "get", "anything", "besides", "CONNECTED" ]
python
train
guaix-ucm/numina
numina/store/gtc/load.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/store/gtc/load.py#L20-L59
def process_node(node): """Process a node in result.json structure""" value = node['value'] mname = node['name'] typeid = node['typeid'] if typeid == 52: # StructDataValue obj = {} for el in value['elements']: key, val = process_node(el) obj[key] = val if value['struct_type'] != 'dict': # Value is not a dict klass = objimp.import_object(value['struct_type']) newobj = klass.__new__(klass) if hasattr(newobj, '__setstate__'): newobj.__setstate__(obj) else: newobj.__dict__ = obj obj = newobj elif typeid == 9: data = value['data'] dim = value['dimension'] shape = dim['height'], dim['width'] obj = data elif typeid == 90: # StructDataValueList obj = [] for el in value: sobj = {} for sel in el['elements']: key, val = process_node(sel) sobj[key] = val obj.append(sobj) elif typeid == 45: # Frame obj = dataframe.DataFrame(frame=os.path.abspath(value['path'])) else: obj = value return mname, obj
[ "def", "process_node", "(", "node", ")", ":", "value", "=", "node", "[", "'value'", "]", "mname", "=", "node", "[", "'name'", "]", "typeid", "=", "node", "[", "'typeid'", "]", "if", "typeid", "==", "52", ":", "# StructDataValue", "obj", "=", "{", "}"...
Process a node in result.json structure
[ "Process", "a", "node", "in", "result", ".", "json", "structure" ]
python
train
leonardt/fault
fault/utils.py
https://github.com/leonardt/fault/blob/da1b48ab727bd85abc54ae9b52841d08188c0df5/fault/utils.py#L7-L67
def get_short_lambda_body_text(lambda_func): """Return the source of a (short) lambda function. If it's impossible to obtain, returns None. """ try: source_lines, _ = inspect.getsourcelines(lambda_func) except (IOError, TypeError): return None # skip `def`-ed functions and long lambdas if len(source_lines) != 1: return None source_text = os.linesep.join(source_lines).strip() # find the AST node of a lambda definition # so we can locate it in the source code source_ast = ast.parse(source_text) lambda_node = next((node for node in ast.walk(source_ast) if isinstance(node, ast.Lambda)), None) if lambda_node is None: # could be a single line `def fn(x): ...` return None # HACK: Since we can (and most likely will) get source lines # where lambdas are just a part of bigger expressions, they will have # some trailing junk after their definition. # # Unfortunately, AST nodes only keep their _starting_ offsets # from the original source, so we have to determine the end ourselves. # We do that by gradually shaving extra junk from after the definition. lambda_text = source_text[lambda_node.col_offset:] lambda_body_text = source_text[lambda_node.body.col_offset:] min_length = len('lambda:_') # shortest possible lambda expression while len(lambda_text) > min_length: try: # What's annoying is that sometimes the junk even parses, # but results in a *different* lambda. You'd probably have to # be deliberately malicious to exploit it but here's one way: # # bloop = lambda x: False, lambda x: True # get_short_lamnda_source(bloop[0]) # # Ideally, we'd just keep shaving until we get the same code, # but that most likely won't happen because we can't replicate # the exact closure environment. code = compile(lambda_body_text, '<unused filename>', 'eval') # Thus the next best thing is to assume some divergence due # to e.g. LOAD_GLOBAL in original code being LOAD_FAST in # the one compiled above, or vice versa. # But the resulting code should at least be the same *length* # if otherwise the same operations are performed in it. if len(code.co_code) == len(lambda_func.__code__.co_code): # return lambda_text return lambda_body_text except SyntaxError: pass lambda_text = lambda_text[:-1] lambda_body_text = lambda_body_text[:-1] return None
[ "def", "get_short_lambda_body_text", "(", "lambda_func", ")", ":", "try", ":", "source_lines", ",", "_", "=", "inspect", ".", "getsourcelines", "(", "lambda_func", ")", "except", "(", "IOError", ",", "TypeError", ")", ":", "return", "None", "# skip `def`-ed func...
Return the source of a (short) lambda function. If it's impossible to obtain, returns None.
[ "Return", "the", "source", "of", "a", "(", "short", ")", "lambda", "function", ".", "If", "it", "s", "impossible", "to", "obtain", "returns", "None", "." ]
python
train
fabioz/PyDev.Debugger
pydevd_attach_to_process/winappdbg/textio.py
https://github.com/fabioz/PyDev.Debugger/blob/ed9c4307662a5593b8a7f1f3389ecd0e79b8c503/pydevd_attach_to_process/winappdbg/textio.py#L1025-L1029
def bk_dark(cls): "Make the current background color dark." wAttributes = cls._get_text_attributes() wAttributes &= ~win32.BACKGROUND_INTENSITY cls._set_text_attributes(wAttributes)
[ "def", "bk_dark", "(", "cls", ")", ":", "wAttributes", "=", "cls", ".", "_get_text_attributes", "(", ")", "wAttributes", "&=", "~", "win32", ".", "BACKGROUND_INTENSITY", "cls", ".", "_set_text_attributes", "(", "wAttributes", ")" ]
Make the current background color dark.
[ "Make", "the", "current", "background", "color", "dark", "." ]
python
train
quantumlib/Cirq
cirq/experiments/google_v2_supremacy_circuit.py
https://github.com/quantumlib/Cirq/blob/0827da80dd7880e5b923eb69407e980ed9bc0bd2/cirq/experiments/google_v2_supremacy_circuit.py#L109-L149
def generate_supremacy_circuit_google_v2_bristlecone(n_rows: int, cz_depth: int, seed: int ) -> circuits.Circuit: """ Generates Google Random Circuits v2 in Bristlecone. See also https://arxiv.org/abs/1807.10749 Args: n_rows: number of rows in a Bristlecone lattice. Note that we do not include single qubit corners. cz_depth: number of layers with CZ gates. seed: seed for the random instance. Returns: A circuit with given size and seed. """ def get_qubits(n_rows): def count_neighbors(qubits, qubit): """Counts the qubits that the given qubit can interact with.""" possibles = [ devices.GridQubit(qubit.row + 1, qubit.col), devices.GridQubit(qubit.row - 1, qubit.col), devices.GridQubit(qubit.row, qubit.col + 1), devices.GridQubit(qubit.row, qubit.col - 1), ] return len(list(e for e in possibles if e in qubits)) assert 1 <= n_rows <= 11 max_row = n_rows - 1 dev = google.Bristlecone # we need a consistent order of qubits qubits = list(dev.qubits) qubits.sort() qubits = [q for q in qubits if q.row <= max_row and q.row + q.col < n_rows + 6 and q.row - q.col < n_rows - 5] qubits = [q for q in qubits if count_neighbors(qubits, q) > 1] return qubits qubits = get_qubits(n_rows) return generate_supremacy_circuit_google_v2(qubits, cz_depth, seed)
[ "def", "generate_supremacy_circuit_google_v2_bristlecone", "(", "n_rows", ":", "int", ",", "cz_depth", ":", "int", ",", "seed", ":", "int", ")", "->", "circuits", ".", "Circuit", ":", "def", "get_qubits", "(", "n_rows", ")", ":", "def", "count_neighbors", "(",...
Generates Google Random Circuits v2 in Bristlecone. See also https://arxiv.org/abs/1807.10749 Args: n_rows: number of rows in a Bristlecone lattice. Note that we do not include single qubit corners. cz_depth: number of layers with CZ gates. seed: seed for the random instance. Returns: A circuit with given size and seed.
[ "Generates", "Google", "Random", "Circuits", "v2", "in", "Bristlecone", ".", "See", "also", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1807", ".", "10749" ]
python
train
cirruscluster/cirruscluster
cirruscluster/ext/ansible/runner/action_plugins/template.py
https://github.com/cirruscluster/cirruscluster/blob/977409929dd81322d886425cdced10608117d5d7/cirruscluster/ext/ansible/runner/action_plugins/template.py#L29-L80
def run(self, conn, tmp, module_name, module_args, inject): ''' handler for template operations ''' if not self.runner.is_playbook: raise errors.AnsibleError("in current versions of ansible, templates are only usable in playbooks") # load up options options = utils.parse_kv(module_args) source = options.get('src', None) dest = options.get('dest', None) if (source is None and 'first_available_file' not in inject) or dest is None: result = dict(failed=True, msg="src and dest are required") return ReturnData(conn=conn, comm_ok=False, result=result) # if we have first_available_file in our vars # look up the files and use the first one we find as src if 'first_available_file' in inject: found = False for fn in self.runner.module_vars.get('first_available_file'): fnt = utils.template(self.runner.basedir, fn, inject) fnd = utils.path_dwim(self.runner.basedir, fnt) if os.path.exists(fnd): source = fnt found = True break if not found: result = dict(failed=True, msg="could not find src in first_available_file list") return ReturnData(conn=conn, comm_ok=False, result=result) else: source = utils.template(self.runner.basedir, source, inject) if dest.endswith("/"): base = os.path.basename(source) dest = os.path.join(dest, base) # template the source data locally & transfer try: resultant = utils.template_from_file(self.runner.basedir, source, inject) except Exception, e: result = dict(failed=True, msg=str(e)) return ReturnData(conn=conn, comm_ok=False, result=result) xfered = self.runner._transfer_str(conn, tmp, 'source', resultant) # fix file permissions when the copy is done as a different user if self.runner.sudo and self.runner.sudo_user != 'root': self.runner._low_level_exec_command(conn, "chmod a+r %s" % xfered, tmp) # run the copy module module_args = "%s src=%s dest=%s" % (module_args, xfered, dest) return self.runner._execute_module(conn, tmp, 'copy', module_args, inject=inject)
[ "def", "run", "(", "self", ",", "conn", ",", "tmp", ",", "module_name", ",", "module_args", ",", "inject", ")", ":", "if", "not", "self", ".", "runner", ".", "is_playbook", ":", "raise", "errors", ".", "AnsibleError", "(", "\"in current versions of ansible, ...
handler for template operations
[ "handler", "for", "template", "operations" ]
python
train
iterative/dvc
dvc/command/run.py
https://github.com/iterative/dvc/blob/8bb21261e34c9632453e09090de7ebe50e38d341/dvc/command/run.py#L61-L74
def _parsed_cmd(self): """ We need to take into account two cases: - ['python code.py foo bar']: Used mainly with dvc as a library - ['echo', 'foo bar']: List of arguments received from the CLI The second case would need quoting, as it was passed through: dvc run echo "foo bar" """ if len(self.args.command) < 2: return " ".join(self.args.command) return " ".join(self._quote_argument(arg) for arg in self.args.command)
[ "def", "_parsed_cmd", "(", "self", ")", ":", "if", "len", "(", "self", ".", "args", ".", "command", ")", "<", "2", ":", "return", "\" \"", ".", "join", "(", "self", ".", "args", ".", "command", ")", "return", "\" \"", ".", "join", "(", "self", "....
We need to take into account two cases: - ['python code.py foo bar']: Used mainly with dvc as a library - ['echo', 'foo bar']: List of arguments received from the CLI The second case would need quoting, as it was passed through: dvc run echo "foo bar"
[ "We", "need", "to", "take", "into", "account", "two", "cases", ":" ]
python
train