repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
angr/claripy
claripy/strings.py
https://github.com/angr/claripy/blob/4ed61924880af1ea8fb778047d896ec0156412a6/claripy/strings.py#L79-L89
def StrPrefixOf(prefix, input_string): """ Return True if the concrete value of the input_string starts with prefix otherwise false. :param prefix: prefix we want to check :param input_string: the string we want to check :return: True if the input_string starts with prefix else false """ return re.match(r'^' + prefix.value, input_string.value) is not None
[ "def", "StrPrefixOf", "(", "prefix", ",", "input_string", ")", ":", "return", "re", ".", "match", "(", "r'^'", "+", "prefix", ".", "value", ",", "input_string", ".", "value", ")", "is", "not", "None" ]
Return True if the concrete value of the input_string starts with prefix otherwise false. :param prefix: prefix we want to check :param input_string: the string we want to check :return: True if the input_string starts with prefix else false
[ "Return", "True", "if", "the", "concrete", "value", "of", "the", "input_string", "starts", "with", "prefix", "otherwise", "false", "." ]
python
train
flo-compbio/genometools
genometools/ensembl/annotations.py
https://github.com/flo-compbio/genometools/blob/dd962bb26d60a0f14ca14d8c9a4dd75768962c7d/genometools/ensembl/annotations.py#L451-L471
def get_linc_rna_genes( path_or_buffer, remove_duplicates=True, **kwargs): r"""Get list of all protein-coding genes based on Ensembl GTF file. Parameters ---------- See :func:`get_genes` function. Returns ------- `pandas.DataFrame` Table with rows corresponding to protein-coding genes. """ valid_biotypes = set(['lincRNA']) df = get_genes(path_or_buffer, valid_biotypes, remove_duplicates=remove_duplicates, **kwargs) return df
[ "def", "get_linc_rna_genes", "(", "path_or_buffer", ",", "remove_duplicates", "=", "True", ",", "*", "*", "kwargs", ")", ":", "valid_biotypes", "=", "set", "(", "[", "'lincRNA'", "]", ")", "df", "=", "get_genes", "(", "path_or_buffer", ",", "valid_biotypes", ...
r"""Get list of all protein-coding genes based on Ensembl GTF file. Parameters ---------- See :func:`get_genes` function. Returns ------- `pandas.DataFrame` Table with rows corresponding to protein-coding genes.
[ "r", "Get", "list", "of", "all", "protein", "-", "coding", "genes", "based", "on", "Ensembl", "GTF", "file", ".", "Parameters", "----------", "See", ":", "func", ":", "get_genes", "function", "." ]
python
train
Microsoft/botbuilder-python
libraries/botbuilder-core/botbuilder/core/card_factory.py
https://github.com/Microsoft/botbuilder-python/blob/274663dd91c811bae6ac4488915ba5880771b0a7/libraries/botbuilder-core/botbuilder/core/card_factory.py#L147-L158
def video_card(card: VideoCard) -> Attachment: """ Returns an attachment for a video card. Will raise a TypeError if 'card' argument is not a VideoCard. :param card: :return: """ if not isinstance(card, VideoCard): raise TypeError('CardFactory.video_card(): `card` argument is not an instance of an VideoCard, ' 'unable to prepare attachment.') return Attachment(content_type=CardFactory.content_types.video_card, content=card)
[ "def", "video_card", "(", "card", ":", "VideoCard", ")", "->", "Attachment", ":", "if", "not", "isinstance", "(", "card", ",", "VideoCard", ")", ":", "raise", "TypeError", "(", "'CardFactory.video_card(): `card` argument is not an instance of an VideoCard, '", "'unable ...
Returns an attachment for a video card. Will raise a TypeError if 'card' argument is not a VideoCard. :param card: :return:
[ "Returns", "an", "attachment", "for", "a", "video", "card", ".", "Will", "raise", "a", "TypeError", "if", "card", "argument", "is", "not", "a", "VideoCard", ".", ":", "param", "card", ":", ":", "return", ":" ]
python
test
romankoblov/leaf
leaf/__init__.py
https://github.com/romankoblov/leaf/blob/e042d91ec462c834318d03f199fcc4a9f565cb84/leaf/__init__.py#L130-L142
def strip_accents(s, pass_symbols=(u'й', u'Й', u'\n')): """ Strip accents from a string """ result = [] for char in s: # Pass these symbols without processing if char in pass_symbols: result.append(char) continue for c in unicodedata.normalize('NFD', char): if unicodedata.category(c) == 'Mn': continue result.append(c) return ''.join(result)
[ "def", "strip_accents", "(", "s", ",", "pass_symbols", "=", "(", "u'й',", " ", "'Й', ", "u", "\\n'))", ":", "", "", "result", "=", "[", "]", "for", "char", "in", "s", ":", "# Pass these symbols without processing", "if", "char", "in", "pass_symbols", ":", ...
Strip accents from a string
[ "Strip", "accents", "from", "a", "string" ]
python
train
rodluger/everest
everest/search.py
https://github.com/rodluger/everest/blob/6779591f9f8b3556847e2fbf761bdfac7520eaea/everest/search.py#L29-L131
def Search(star, pos_tol=2.5, neg_tol=50., **ps_kwargs): ''' NOTE: `pos_tol` is the positive (i.e., above the median) outlier tolerance in standard deviations. NOTE: `neg_tol` is the negative (i.e., below the median) outlier tolerance in standard deviations. ''' # Smooth the light curve t = np.delete(star.time, np.concatenate([star.nanmask, star.badmask])) f = np.delete(star.flux, np.concatenate([star.nanmask, star.badmask])) f = SavGol(f) med = np.nanmedian(f) # Kill positive outliers MAD = 1.4826 * np.nanmedian(np.abs(f - med)) pos_inds = np.where((f > med + pos_tol * MAD))[0] pos_inds = np.array([np.argmax(star.time == t[i]) for i in pos_inds]) # Kill negative outliers MAD = 1.4826 * np.nanmedian(np.abs(f - med)) neg_inds = np.where((f < med - neg_tol * MAD))[0] neg_inds = np.array([np.argmax(star.time == t[i]) for i in neg_inds]) # Replace the star.outmask array star.outmask = np.concatenate([neg_inds, pos_inds]) star.transitmask = np.array([], dtype=int) # Delta chi squared TIME = np.array([]) DEPTH = np.array([]) VARDEPTH = np.array([]) DELCHISQ = np.array([]) for b, brkpt in enumerate(star.breakpoints): # Log log.info('Running chunk %d/%d...' % (b + 1, len(star.breakpoints))) # Masks for current chunk m = star.get_masked_chunk(b, pad=False) # This block of the masked covariance matrix K = GetCovariance(star.kernel, star.kernel_params, star.time[m], star.fraw_err[m]) # The masked X.L.X^T term A = np.zeros((len(m), len(m))) for n in range(star.pld_order): XM = star.X(n, m) A += star.lam[b][n] * np.dot(XM, XM.T) K += A CDK = cho_factor(K) # Baseline med = np.nanmedian(star.fraw[m]) lnL0 = -0.5 * np.dot(star.fraw[m], cho_solve(CDK, star.fraw[m])) dt = np.median(np.diff(star.time[m])) # Create a uniform time array and get indices of missing cadences tol = np.nanmedian(np.diff(star.time[m])) / 5. tunif = np.arange(star.time[m][0], star.time[m][-1] + tol, dt) tnogaps = np.array(tunif) gaps = [] j = 0 for i, t in enumerate(tunif): if np.abs(star.time[m][j] - t) < tol: tnogaps[i] = star.time[m][j] j += 1 if j == len(star.time[m]): break else: gaps.append(i) gaps = np.array(gaps, dtype=int) # Compute the normalized transit model for a single transit transit_model = TransitShape(**ps_kwargs) # Now roll the transit model across each cadence dchisq = np.zeros(len(tnogaps)) d = np.zeros(len(tnogaps)) vard = np.zeros(len(tnogaps)) for i in prange(len(tnogaps)): trn = transit_model(tnogaps, tnogaps[i]) trn = np.delete(trn, gaps) trn *= med vard[i] = 1. / np.dot(trn, cho_solve(CDK, trn)) if not np.isfinite(vard[i]): vard[i] = np.nan d[i] = np.nan dchisq[i] = np.nan continue d[i] = vard[i] * np.dot(trn, cho_solve(CDK, star.fraw[m])) r = star.fraw[m] - trn * d[i] lnL = -0.5 * np.dot(r, cho_solve(CDK, r)) dchisq[i] = -2 * (lnL0 - lnL) TIME = np.append(TIME, tnogaps) DEPTH = np.append(DEPTH, d) VARDEPTH = np.append(VARDEPTH, vard) DELCHISQ = np.append(DELCHISQ, dchisq) return TIME, DEPTH, VARDEPTH, DELCHISQ
[ "def", "Search", "(", "star", ",", "pos_tol", "=", "2.5", ",", "neg_tol", "=", "50.", ",", "*", "*", "ps_kwargs", ")", ":", "# Smooth the light curve", "t", "=", "np", ".", "delete", "(", "star", ".", "time", ",", "np", ".", "concatenate", "(", "[", ...
NOTE: `pos_tol` is the positive (i.e., above the median) outlier tolerance in standard deviations. NOTE: `neg_tol` is the negative (i.e., below the median) outlier tolerance in standard deviations.
[ "NOTE", ":", "pos_tol", "is", "the", "positive", "(", "i", ".", "e", ".", "above", "the", "median", ")", "outlier", "tolerance", "in", "standard", "deviations", ".", "NOTE", ":", "neg_tol", "is", "the", "negative", "(", "i", ".", "e", ".", "below", "...
python
train
globocom/GloboNetworkAPI-client-python
networkapiclient/ApiNetworkIPv4.py
https://github.com/globocom/GloboNetworkAPI-client-python/blob/cf34f913da48d9abbf750114f5d2ac4b2dde137d/networkapiclient/ApiNetworkIPv4.py#L73-L79
def check_vip_ip(self, ip, environment_vip): """ Check available ip in environment vip """ uri = 'api/ipv4/ip/%s/environment-vip/%s/' % (ip, environment_vip) return super(ApiNetworkIPv4, self).get(uri)
[ "def", "check_vip_ip", "(", "self", ",", "ip", ",", "environment_vip", ")", ":", "uri", "=", "'api/ipv4/ip/%s/environment-vip/%s/'", "%", "(", "ip", ",", "environment_vip", ")", "return", "super", "(", "ApiNetworkIPv4", ",", "self", ")", ".", "get", "(", "ur...
Check available ip in environment vip
[ "Check", "available", "ip", "in", "environment", "vip" ]
python
train
romanz/trezor-agent
libagent/device/ui.py
https://github.com/romanz/trezor-agent/blob/513b1259c4d7aca5f88cd958edc11828d0712f1b/libagent/device/ui.py#L64-L80
def create_default_options_getter(): """Return current TTY and DISPLAY settings for GnuPG pinentry.""" options = [] try: ttyname = subprocess.check_output(args=['tty']).strip() options.append(b'ttyname=' + ttyname) except subprocess.CalledProcessError as e: log.warning('no TTY found: %s', e) display = os.environ.get('DISPLAY') if display is not None: options.append('display={}'.format(display).encode('ascii')) else: log.warning('DISPLAY not defined') log.info('using %s for pinentry options', options) return lambda: options
[ "def", "create_default_options_getter", "(", ")", ":", "options", "=", "[", "]", "try", ":", "ttyname", "=", "subprocess", ".", "check_output", "(", "args", "=", "[", "'tty'", "]", ")", ".", "strip", "(", ")", "options", ".", "append", "(", "b'ttyname='"...
Return current TTY and DISPLAY settings for GnuPG pinentry.
[ "Return", "current", "TTY", "and", "DISPLAY", "settings", "for", "GnuPG", "pinentry", "." ]
python
train
pypa/pipenv
pipenv/vendor/pathlib2/__init__.py
https://github.com/pypa/pipenv/blob/cae8d76c210b9777e90aab76e9c4b0e53bb19cde/pipenv/vendor/pathlib2/__init__.py#L1469-L1475
def chmod(self, mode): """ Change the permissions of the path, like os.chmod(). """ if self._closed: self._raise_closed() self._accessor.chmod(self, mode)
[ "def", "chmod", "(", "self", ",", "mode", ")", ":", "if", "self", ".", "_closed", ":", "self", ".", "_raise_closed", "(", ")", "self", ".", "_accessor", ".", "chmod", "(", "self", ",", "mode", ")" ]
Change the permissions of the path, like os.chmod().
[ "Change", "the", "permissions", "of", "the", "path", "like", "os", ".", "chmod", "()", "." ]
python
train
cltl/KafNafParserPy
KafNafParserPy/KafNafParserMod.py
https://github.com/cltl/KafNafParserPy/blob/9bc32e803c176404b255ba317479b8780ed5f569/KafNafParserPy/KafNafParserMod.py#L707-L721
def dump(self,filename=None): """ Dumps the object to an output filename (or open file descriptor). The filename parameter is optional, and if it is not provided, the standard output will be used @type filename: string or file descriptor @param filename: file where to dump the object (default standard output) """ if filename is None: with io.BytesIO() as buffer: self.dump(filename=buffer) bytes = buffer.getvalue() getattr(sys.stdout, 'buffer', sys.stdout).write(bytes) else: self.tree.write(filename,encoding='UTF-8',pretty_print=True,xml_declaration=True)
[ "def", "dump", "(", "self", ",", "filename", "=", "None", ")", ":", "if", "filename", "is", "None", ":", "with", "io", ".", "BytesIO", "(", ")", "as", "buffer", ":", "self", ".", "dump", "(", "filename", "=", "buffer", ")", "bytes", "=", "buffer", ...
Dumps the object to an output filename (or open file descriptor). The filename parameter is optional, and if it is not provided, the standard output will be used @type filename: string or file descriptor @param filename: file where to dump the object (default standard output)
[ "Dumps", "the", "object", "to", "an", "output", "filename", "(", "or", "open", "file", "descriptor", ")", ".", "The", "filename", "parameter", "is", "optional", "and", "if", "it", "is", "not", "provided", "the", "standard", "output", "will", "be", "used" ]
python
train
lmjohns3/theanets
theanets/graph.py
https://github.com/lmjohns3/theanets/blob/79db9f878ef2071f2f576a1cf5d43a752a55894a/theanets/graph.py#L636-L654
def loss(self, **kwargs): '''Return a variable representing the regularized loss for this network. The regularized loss includes both the :ref:`loss computation <losses>` for the network as well as any :ref:`regularizers <regularizers>` that are in place. Keyword arguments are passed directly to :func:`theanets.regularizers.from_kwargs`. Returns ------- loss : Theano expression A Theano expression representing the loss of this network. ''' regs = regularizers.from_kwargs(self, **kwargs) outputs, _ = self.build_graph(regs) return sum(l.weight * l(outputs) for l in self.losses) + \ sum(r.weight * r.loss(self.layers, outputs) for r in regs)
[ "def", "loss", "(", "self", ",", "*", "*", "kwargs", ")", ":", "regs", "=", "regularizers", ".", "from_kwargs", "(", "self", ",", "*", "*", "kwargs", ")", "outputs", ",", "_", "=", "self", ".", "build_graph", "(", "regs", ")", "return", "sum", "(",...
Return a variable representing the regularized loss for this network. The regularized loss includes both the :ref:`loss computation <losses>` for the network as well as any :ref:`regularizers <regularizers>` that are in place. Keyword arguments are passed directly to :func:`theanets.regularizers.from_kwargs`. Returns ------- loss : Theano expression A Theano expression representing the loss of this network.
[ "Return", "a", "variable", "representing", "the", "regularized", "loss", "for", "this", "network", "." ]
python
test
log2timeline/plaso
plaso/output/timesketch_out.py
https://github.com/log2timeline/plaso/blob/9c564698d2da3ffbe23607a3c54c0582ea18a6cc/plaso/output/timesketch_out.py#L68-L75
def SetTimelineName(self, timeline_name): """Sets the timeline name. Args: timeline_name (str): timeline name. """ self._timeline_name = timeline_name logger.info('Timeline name: {0:s}'.format(self._timeline_name))
[ "def", "SetTimelineName", "(", "self", ",", "timeline_name", ")", ":", "self", ".", "_timeline_name", "=", "timeline_name", "logger", ".", "info", "(", "'Timeline name: {0:s}'", ".", "format", "(", "self", ".", "_timeline_name", ")", ")" ]
Sets the timeline name. Args: timeline_name (str): timeline name.
[ "Sets", "the", "timeline", "name", "." ]
python
train
huntrar/scrape
scrape/utils.py
https://github.com/huntrar/scrape/blob/bf877f6da5df3ed0f2bea60a95acf7df63c88002/scrape/utils.py#L144-L160
def re_filter(text, regexps): """Filter text using regular expressions.""" if not regexps: return text matched_text = [] compiled_regexps = [re.compile(x) for x in regexps] for line in text: if line in matched_text: continue for regexp in compiled_regexps: found = regexp.search(line) if found and found.group(): matched_text.append(line) return matched_text or text
[ "def", "re_filter", "(", "text", ",", "regexps", ")", ":", "if", "not", "regexps", ":", "return", "text", "matched_text", "=", "[", "]", "compiled_regexps", "=", "[", "re", ".", "compile", "(", "x", ")", "for", "x", "in", "regexps", "]", "for", "line...
Filter text using regular expressions.
[ "Filter", "text", "using", "regular", "expressions", "." ]
python
train
eaton-lab/toytree
toytree/etemini.py
https://github.com/eaton-lab/toytree/blob/0347ed2098acc5f707fadf52a0ecd411a6d1859c/toytree/etemini.py#L574-L578
def iter_descendants(self, strategy="levelorder", is_leaf_fn=None): """ Returns an iterator over all descendant nodes.""" for n in self.traverse(strategy=strategy, is_leaf_fn=is_leaf_fn): if n is not self: yield n
[ "def", "iter_descendants", "(", "self", ",", "strategy", "=", "\"levelorder\"", ",", "is_leaf_fn", "=", "None", ")", ":", "for", "n", "in", "self", ".", "traverse", "(", "strategy", "=", "strategy", ",", "is_leaf_fn", "=", "is_leaf_fn", ")", ":", "if", "...
Returns an iterator over all descendant nodes.
[ "Returns", "an", "iterator", "over", "all", "descendant", "nodes", "." ]
python
train
angr/angr
angr/calling_conventions.py
https://github.com/angr/angr/blob/4e2f97d56af5419ee73bdb30482c8dd8ff5f3e40/angr/calling_conventions.py#L522-L629
def setup_callsite(self, state, ret_addr, args, stack_base=None, alloc_base=None, grow_like_stack=True): """ This function performs the actions of the caller getting ready to jump into a function. :param state: The SimState to operate on :param ret_addr: The address to return to when the called function finishes :param args: The list of arguments that that the called function will see :param stack_base: An optional pointer to use as the top of the stack, circa the function entry point :param alloc_base: An optional pointer to use as the place to put excess argument data :param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the same type and size, while tuples (representing structs) can be elements of any type and size. If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value that can't fit in a register will be automatically put in a PointerWrapper. If stack_base is not provided, the current stack pointer will be used, and it will be updated. If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True. grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential allocations happen at increasing addresses. """ # STEP 0: clerical work if isinstance(self, SimCCSoot): SimEngineSoot.setup_callsite(state, args, ret_addr) return allocator = AllocHelper(self.arch.bits, self.arch.memory_endness == 'Iend_LE') # # STEP 1: convert all values into serialized form # this entails creating the vals list of simple values to store and also populating the allocator's # understanding of what aux data needs to be stored # This is also where we compute arg locations (arg_locs) # if self.func_ty is not None: vals = [self._standardize_value(arg, ty, state, allocator.dump) for arg, ty in zip(args, self.func_ty.args)] else: vals = [self._standardize_value(arg, None, state, allocator.dump) for arg in args] arg_session = self.arg_session arg_locs = [None]*len(args) for i, (arg, val) in enumerate(zip(args, vals)): if self.is_fp_value(arg) or \ (self.func_ty is not None and isinstance(self.func_ty.args[i], SimTypeFloat)): arg_locs[i] = arg_session.next_arg(is_fp=True, size=val.length // state.arch.byte_width) continue if val.length > state.arch.bits or (self.func_ty is None and isinstance(arg, (bytes, str, list, tuple))): vals[i] = allocator.dump(val, state) elif val.length < state.arch.bits: if self.arch.memory_endness == 'Iend_LE': vals[i] = val.concat(claripy.BVV(0, state.arch.bits - val.length)) else: vals[i] = claripy.BVV(0, state.arch.bits - val.length).concat(val) arg_locs[i] = arg_session.next_arg(is_fp=False, size=vals[i].length // state.arch.byte_width) # # STEP 2: decide on memory storage locations # implement the contract for stack_base/alloc_base/grow_like_stack # after this, stack_base should be the final stack pointer, alloc_base should be the final aux storage location, # and the stack pointer should be updated # if stack_base is None: if alloc_base is None: alloc_size = allocator.size() state.regs.sp -= alloc_size alloc_base = state.regs.sp grow_like_stack = False state.regs.sp -= self.stack_space(arg_locs) # handle alignment alignment = (state.regs.sp + self.STACKARG_SP_DIFF) % self.STACK_ALIGNMENT state.regs.sp -= alignment else: state.regs.sp = stack_base if alloc_base is None: alloc_base = stack_base + self.stack_space(arg_locs) grow_like_stack = False if grow_like_stack: alloc_base -= allocator.size() if type(alloc_base) is int: alloc_base = claripy.BVV(alloc_base, state.arch.bits) for i, val in enumerate(vals): vals[i] = allocator.translate(val, alloc_base) # # STEP 3: store everything! # allocator.apply(state, alloc_base) for loc, val in zip(arg_locs, vals): if val.length > loc.size * 8: raise ValueError("Can't fit value {} into location {}".format(repr(val), repr(loc))) loc.set_value(state, val, endness='Iend_BE', stack_base=stack_base) self.return_addr.set_value(state, ret_addr, stack_base=stack_base)
[ "def", "setup_callsite", "(", "self", ",", "state", ",", "ret_addr", ",", "args", ",", "stack_base", "=", "None", ",", "alloc_base", "=", "None", ",", "grow_like_stack", "=", "True", ")", ":", "# STEP 0: clerical work", "if", "isinstance", "(", "self", ",", ...
This function performs the actions of the caller getting ready to jump into a function. :param state: The SimState to operate on :param ret_addr: The address to return to when the called function finishes :param args: The list of arguments that that the called function will see :param stack_base: An optional pointer to use as the top of the stack, circa the function entry point :param alloc_base: An optional pointer to use as the place to put excess argument data :param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the same type and size, while tuples (representing structs) can be elements of any type and size. If you'd like there to be a pointer to a given value, wrap the value in a `PointerWrapper`. Any value that can't fit in a register will be automatically put in a PointerWrapper. If stack_base is not provided, the current stack pointer will be used, and it will be updated. If alloc_base is not provided, the stack base will be used and grow_like_stack will implicitly be True. grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequential allocations happen at increasing addresses.
[ "This", "function", "performs", "the", "actions", "of", "the", "caller", "getting", "ready", "to", "jump", "into", "a", "function", "." ]
python
train
auth0/auth0-python
auth0/v3/management/connections.py
https://github.com/auth0/auth0-python/blob/34adad3f342226aaaa6071387fa405ab840e5c02/auth0/v3/management/connections.py#L63-L85
def get(self, id, fields=None, include_fields=True): """Retrieve connection by id. Args: id (str): Id of the connection to get. fields (list of str, optional): A list of fields to include or exclude from the result (depending on include_fields). Empty to retrieve all fields. include_fields (bool, optional): True if the fields specified are to be included in the result, False otherwise. See: https://auth0.com/docs/api/management/v2#!/Connections/get_connections_by_id Returns: A connection object. """ params = {'fields': fields and ','.join(fields) or None, 'include_fields': str(include_fields).lower()} return self.client.get(self._url(id), params=params)
[ "def", "get", "(", "self", ",", "id", ",", "fields", "=", "None", ",", "include_fields", "=", "True", ")", ":", "params", "=", "{", "'fields'", ":", "fields", "and", "','", ".", "join", "(", "fields", ")", "or", "None", ",", "'include_fields'", ":", ...
Retrieve connection by id. Args: id (str): Id of the connection to get. fields (list of str, optional): A list of fields to include or exclude from the result (depending on include_fields). Empty to retrieve all fields. include_fields (bool, optional): True if the fields specified are to be included in the result, False otherwise. See: https://auth0.com/docs/api/management/v2#!/Connections/get_connections_by_id Returns: A connection object.
[ "Retrieve", "connection", "by", "id", "." ]
python
train
oceanprotocol/squid-py
squid_py/agreements/storage.py
https://github.com/oceanprotocol/squid-py/blob/43a5b7431627e4c9ab7382ed9eb8153e96ed4483/squid_py/agreements/storage.py#L42-L60
def update_service_agreement_status(storage_path, service_agreement_id, status='pending'): """ Update the service agreement status. :param storage_path: storage path for the internal db, str :param service_agreement_id: :param status: :return: """ conn = sqlite3.connect(storage_path) try: cursor = conn.cursor() cursor.execute( 'UPDATE service_agreements SET status=? WHERE id=?', (status, service_agreement_id), ) conn.commit() finally: conn.close()
[ "def", "update_service_agreement_status", "(", "storage_path", ",", "service_agreement_id", ",", "status", "=", "'pending'", ")", ":", "conn", "=", "sqlite3", ".", "connect", "(", "storage_path", ")", "try", ":", "cursor", "=", "conn", ".", "cursor", "(", ")",...
Update the service agreement status. :param storage_path: storage path for the internal db, str :param service_agreement_id: :param status: :return:
[ "Update", "the", "service", "agreement", "status", "." ]
python
train
gem/oq-engine
openquake/calculators/base.py
https://github.com/gem/oq-engine/blob/8294553a0b8aba33fd96437a35065d03547d0040/openquake/calculators/base.py#L766-L800
def read_shakemap(self, haz_sitecol, assetcol): """ Enabled only if there is a shakemap_id parameter in the job.ini. Download, unzip, parse USGS shakemap files and build a corresponding set of GMFs which are then filtered with the hazard site collection and stored in the datastore. """ oq = self.oqparam E = oq.number_of_ground_motion_fields oq.risk_imtls = oq.imtls or self.datastore.parent['oqparam'].imtls extra = self.riskmodel.get_extra_imts(oq.risk_imtls) if extra: logging.warning('There are risk functions for not available IMTs ' 'which will be ignored: %s' % extra) logging.info('Getting/reducing shakemap') with self.monitor('getting/reducing shakemap'): smap = oq.shakemap_id if oq.shakemap_id else numpy.load( oq.inputs['shakemap']) sitecol, shakemap, discarded = get_sitecol_shakemap( smap, oq.imtls, haz_sitecol, oq.asset_hazard_distance['default'], oq.discard_assets) if len(discarded): self.datastore['discarded'] = discarded assetcol = assetcol.reduce_also(sitecol) logging.info('Building GMFs') with self.monitor('building/saving GMFs'): imts, gmfs = to_gmfs( shakemap, oq.spatial_correlation, oq.cross_correlation, oq.site_effects, oq.truncation_level, E, oq.random_seed, oq.imtls) save_gmf_data(self.datastore, sitecol, gmfs, imts) return sitecol, assetcol
[ "def", "read_shakemap", "(", "self", ",", "haz_sitecol", ",", "assetcol", ")", ":", "oq", "=", "self", ".", "oqparam", "E", "=", "oq", ".", "number_of_ground_motion_fields", "oq", ".", "risk_imtls", "=", "oq", ".", "imtls", "or", "self", ".", "datastore", ...
Enabled only if there is a shakemap_id parameter in the job.ini. Download, unzip, parse USGS shakemap files and build a corresponding set of GMFs which are then filtered with the hazard site collection and stored in the datastore.
[ "Enabled", "only", "if", "there", "is", "a", "shakemap_id", "parameter", "in", "the", "job", ".", "ini", ".", "Download", "unzip", "parse", "USGS", "shakemap", "files", "and", "build", "a", "corresponding", "set", "of", "GMFs", "which", "are", "then", "fil...
python
train
quizl/quizler
quizler/models.py
https://github.com/quizl/quizler/blob/44b3fd91f7074e7013ffde8147455f45ebdccc46/quizler/models.py#L127-L133
def to_dict(self): """Convert WordSet into raw dictionary data.""" return { 'id': self.set_id, 'title': self.title, 'terms': [term.to_dict() for term in self.terms] }
[ "def", "to_dict", "(", "self", ")", ":", "return", "{", "'id'", ":", "self", ".", "set_id", ",", "'title'", ":", "self", ".", "title", ",", "'terms'", ":", "[", "term", ".", "to_dict", "(", ")", "for", "term", "in", "self", ".", "terms", "]", "}"...
Convert WordSet into raw dictionary data.
[ "Convert", "WordSet", "into", "raw", "dictionary", "data", "." ]
python
train
has2k1/plotnine
plotnine/doctools.py
https://github.com/has2k1/plotnine/blob/566e579af705367e584fb27a74e6c5199624ca89/plotnine/doctools.py#L207-L243
def docstring_section_lines(docstring, section_name): """ Return a section of a numpydoc string Paramters --------- docstring : str Docstring section_name : str Name of section to return Returns ------- section : str Section minus the header """ lines = [] inside_section = False underline = '-' * len(section_name) expect_underline = False for line in docstring.splitlines(): _line = line.strip().lower() if expect_underline: expect_underline = False if _line == underline: inside_section = True continue if _line == section_name: expect_underline = True elif _line in DOCSTRING_SECTIONS: # next section break elif inside_section: lines.append(line) return '\n'.join(lines)
[ "def", "docstring_section_lines", "(", "docstring", ",", "section_name", ")", ":", "lines", "=", "[", "]", "inside_section", "=", "False", "underline", "=", "'-'", "*", "len", "(", "section_name", ")", "expect_underline", "=", "False", "for", "line", "in", "...
Return a section of a numpydoc string Paramters --------- docstring : str Docstring section_name : str Name of section to return Returns ------- section : str Section minus the header
[ "Return", "a", "section", "of", "a", "numpydoc", "string" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/IPython/frontend/terminal/ipapp.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/IPython/frontend/terminal/ipapp.py#L368-L382
def load_default_config(ipython_dir=None): """Load the default config file from the default ipython_dir. This is useful for embedded shells. """ if ipython_dir is None: ipython_dir = get_ipython_dir() profile_dir = os.path.join(ipython_dir, 'profile_default') cl = PyFileConfigLoader(default_config_file_name, profile_dir) try: config = cl.load_config() except ConfigFileNotFound: # no config found config = Config() return config
[ "def", "load_default_config", "(", "ipython_dir", "=", "None", ")", ":", "if", "ipython_dir", "is", "None", ":", "ipython_dir", "=", "get_ipython_dir", "(", ")", "profile_dir", "=", "os", ".", "path", ".", "join", "(", "ipython_dir", ",", "'profile_default'", ...
Load the default config file from the default ipython_dir. This is useful for embedded shells.
[ "Load", "the", "default", "config", "file", "from", "the", "default", "ipython_dir", "." ]
python
test
ml4ai/delphi
delphi/translators/for2py/arrays.py
https://github.com/ml4ai/delphi/blob/6d03d8aafeab99610387c51b89c99738ff2abbe3/delphi/translators/for2py/arrays.py#L179-L188
def array_values(expr): """Given an expression expr denoting a list of values, array_values(expr) returns a list of values for that expression.""" if isinstance(expr, Array): return expr.get_elems(all_subs(expr._bounds)) elif isinstance(expr, list): vals = [array_values(x) for x in expr] return flatten(vals) else: return [expr]
[ "def", "array_values", "(", "expr", ")", ":", "if", "isinstance", "(", "expr", ",", "Array", ")", ":", "return", "expr", ".", "get_elems", "(", "all_subs", "(", "expr", ".", "_bounds", ")", ")", "elif", "isinstance", "(", "expr", ",", "list", ")", ":...
Given an expression expr denoting a list of values, array_values(expr) returns a list of values for that expression.
[ "Given", "an", "expression", "expr", "denoting", "a", "list", "of", "values", "array_values", "(", "expr", ")", "returns", "a", "list", "of", "values", "for", "that", "expression", "." ]
python
train
saltstack/salt
salt/modules/syslog_ng.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/modules/syslog_ng.py#L1146-L1164
def _write_config(config, newlines=2): ''' Writes the given parameter config into the config file. ''' text = config if isinstance(config, dict) and len(list(list(config.keys()))) == 1: key = next(six.iterkeys(config)) text = config[key] try: with salt.utils.files.fopen(__SYSLOG_NG_CONFIG_FILE, 'a') as fha: fha.write(salt.utils.stringutils.to_str(text)) for _ in range(0, newlines): fha.write(salt.utils.stringutils.to_str(os.linesep)) return True except Exception as err: log.error(six.text_type(err)) return False
[ "def", "_write_config", "(", "config", ",", "newlines", "=", "2", ")", ":", "text", "=", "config", "if", "isinstance", "(", "config", ",", "dict", ")", "and", "len", "(", "list", "(", "list", "(", "config", ".", "keys", "(", ")", ")", ")", ")", "...
Writes the given parameter config into the config file.
[ "Writes", "the", "given", "parameter", "config", "into", "the", "config", "file", "." ]
python
train
enkore/i3pystatus
i3pystatus/core/command.py
https://github.com/enkore/i3pystatus/blob/14cfde967cecf79b40e223e35a04600f4c875af7/i3pystatus/core/command.py#L9-L50
def run_through_shell(command, enable_shell=False): """ Retrieve output of a command. Returns a named tuple with three elements: * ``rc`` (integer) Return code of command. * ``out`` (string) Everything that was printed to stdout. * ``err`` (string) Everything that was printed to stderr. Don't use this function with programs that outputs lots of data since the output is saved in one variable. :param command: A string or a list of strings containing the name and arguments of the program. :param enable_shell: If set ot `True` users default shell will be invoked and given ``command`` to execute. The ``command`` should obviously be a string since shell does all the parsing. """ if not enable_shell and isinstance(command, str): command = shlex.split(command) returncode = None stderr = None try: proc = subprocess.Popen(command, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=enable_shell) out, stderr = proc.communicate() out = out.decode("UTF-8") stderr = stderr.decode("UTF-8") returncode = proc.returncode except OSError as e: out = e.strerror stderr = e.strerror logging.getLogger("i3pystatus.core.command").exception("") except subprocess.CalledProcessError as e: out = e.output logging.getLogger("i3pystatus.core.command").exception("") return CommandResult(returncode, out, stderr)
[ "def", "run_through_shell", "(", "command", ",", "enable_shell", "=", "False", ")", ":", "if", "not", "enable_shell", "and", "isinstance", "(", "command", ",", "str", ")", ":", "command", "=", "shlex", ".", "split", "(", "command", ")", "returncode", "=", ...
Retrieve output of a command. Returns a named tuple with three elements: * ``rc`` (integer) Return code of command. * ``out`` (string) Everything that was printed to stdout. * ``err`` (string) Everything that was printed to stderr. Don't use this function with programs that outputs lots of data since the output is saved in one variable. :param command: A string or a list of strings containing the name and arguments of the program. :param enable_shell: If set ot `True` users default shell will be invoked and given ``command`` to execute. The ``command`` should obviously be a string since shell does all the parsing.
[ "Retrieve", "output", "of", "a", "command", ".", "Returns", "a", "named", "tuple", "with", "three", "elements", ":" ]
python
train
Becksteinlab/GromacsWrapper
gromacs/cbook.py
https://github.com/Becksteinlab/GromacsWrapper/blob/d4f9a8cb6f48292732cf7c7e4ef4a6d2ccbc51b9/gromacs/cbook.py#L2070-L2105
def strip_fit(self, **kwargs): """Strip water and fit to the remaining system. First runs :meth:`strip_water` and then :meth:`fit`; see there for arguments. - *strip_input* is used for :meth:`strip_water` (but is only useful in special cases, e.g. when there is no Protein group defined. Then set *strip_input* = ``['Other']``. - *input* is passed on to :meth:`fit` and can contain the ``[center_group, fit_group, output_group]`` - *fitgroup* is only passed to :meth:`fit` and just contains the group to fit to ("backbone" by default) .. warning:: *fitgroup* can only be a Gromacs default group and not a custom group (because the indices change after stripping) - By default *fit* = "rot+trans" (and *fit* is passed to :meth:`fit`, together with the *xy* = ``False`` keyword) .. Note:: The call signature of :meth:`strip_water` is somewhat different from this one. """ kwargs.setdefault('fit', 'rot+trans') kw_fit = {} for k in ('xy', 'fit', 'fitgroup', 'input'): if k in kwargs: kw_fit[k] = kwargs.pop(k) kwargs['input'] = kwargs.pop('strip_input', ['Protein']) kwargs['force'] = kw_fit['force'] = kwargs.pop('force', self.force) paths = self.strip_water(**kwargs) # updates self.nowater transformer_nowater = self.nowater[paths['xtc']] # make sure to get the one we just produced return transformer_nowater.fit(**kw_fit)
[ "def", "strip_fit", "(", "self", ",", "*", "*", "kwargs", ")", ":", "kwargs", ".", "setdefault", "(", "'fit'", ",", "'rot+trans'", ")", "kw_fit", "=", "{", "}", "for", "k", "in", "(", "'xy'", ",", "'fit'", ",", "'fitgroup'", ",", "'input'", ")", ":...
Strip water and fit to the remaining system. First runs :meth:`strip_water` and then :meth:`fit`; see there for arguments. - *strip_input* is used for :meth:`strip_water` (but is only useful in special cases, e.g. when there is no Protein group defined. Then set *strip_input* = ``['Other']``. - *input* is passed on to :meth:`fit` and can contain the ``[center_group, fit_group, output_group]`` - *fitgroup* is only passed to :meth:`fit` and just contains the group to fit to ("backbone" by default) .. warning:: *fitgroup* can only be a Gromacs default group and not a custom group (because the indices change after stripping) - By default *fit* = "rot+trans" (and *fit* is passed to :meth:`fit`, together with the *xy* = ``False`` keyword) .. Note:: The call signature of :meth:`strip_water` is somewhat different from this one.
[ "Strip", "water", "and", "fit", "to", "the", "remaining", "system", "." ]
python
valid
mixmastamyk/console
console/detection.py
https://github.com/mixmastamyk/console/blob/afe6c95d5a7b83d85376f450454e3769e4a5c3d0/console/detection.py#L347-L375
def load_x11_color_map(paths=X11_RGB_PATHS): ''' Load and parse X11's rgb.txt. Loads: x11_color_map: { name_lower: ('R', 'G', 'B') } ''' if type(paths) is str: paths = (paths,) x11_color_map = color_tables.x11_color_map for path in paths: try: with open(path) as infile: for line in infile: if line.startswith('!') or line.isspace(): continue tokens = line.rstrip().split(maxsplit=3) key = tokens[3] if ' ' in key: # skip names with spaces to match webcolors continue x11_color_map[key.lower()] = tuple(tokens[:3]) log.debug('X11 palette found at %r.', path) break except FileNotFoundError as err: log.debug('X11 palette file not found: %r', path) except IOError as err: log.debug('X11 palette file not read: %s', err)
[ "def", "load_x11_color_map", "(", "paths", "=", "X11_RGB_PATHS", ")", ":", "if", "type", "(", "paths", ")", "is", "str", ":", "paths", "=", "(", "paths", ",", ")", "x11_color_map", "=", "color_tables", ".", "x11_color_map", "for", "path", "in", "paths", ...
Load and parse X11's rgb.txt. Loads: x11_color_map: { name_lower: ('R', 'G', 'B') }
[ "Load", "and", "parse", "X11", "s", "rgb", ".", "txt", "." ]
python
train
PyCQA/astroid
astroid/brain/brain_builtin_inference.py
https://github.com/PyCQA/astroid/blob/e0a298df55b15abcb77c2a93253f5ab7be52d0fb/astroid/brain/brain_builtin_inference.py#L404-L429
def infer_getattr(node, context=None): """Understand getattr calls If one of the arguments is an Uninferable object, then the result will be an Uninferable object. Otherwise, the normal attribute lookup will be done. """ obj, attr = _infer_getattr_args(node, context) if ( obj is util.Uninferable or attr is util.Uninferable or not hasattr(obj, "igetattr") ): return util.Uninferable try: return next(obj.igetattr(attr, context=context)) except (StopIteration, InferenceError, AttributeInferenceError): if len(node.args) == 3: # Try to infer the default and return it instead. try: return next(node.args[2].infer(context=context)) except InferenceError: raise UseInferenceDefault raise UseInferenceDefault
[ "def", "infer_getattr", "(", "node", ",", "context", "=", "None", ")", ":", "obj", ",", "attr", "=", "_infer_getattr_args", "(", "node", ",", "context", ")", "if", "(", "obj", "is", "util", ".", "Uninferable", "or", "attr", "is", "util", ".", "Uninfera...
Understand getattr calls If one of the arguments is an Uninferable object, then the result will be an Uninferable object. Otherwise, the normal attribute lookup will be done.
[ "Understand", "getattr", "calls" ]
python
train
barryp/py-amqplib
amqplib/client_0_8/connection.py
https://github.com/barryp/py-amqplib/blob/2b3a47de34b4712c111d0a55d7ff109dffc2a7b2/amqplib/client_0_8/connection.py#L495-L508
def _open_ok(self, args): """ signal that the connection is ready This method signals to the client that the connection is ready for use. PARAMETERS: known_hosts: shortstr """ self.known_hosts = args.read_shortstr() AMQP_LOGGER.debug('Open OK! known_hosts [%s]' % self.known_hosts) return None
[ "def", "_open_ok", "(", "self", ",", "args", ")", ":", "self", ".", "known_hosts", "=", "args", ".", "read_shortstr", "(", ")", "AMQP_LOGGER", ".", "debug", "(", "'Open OK! known_hosts [%s]'", "%", "self", ".", "known_hosts", ")", "return", "None" ]
signal that the connection is ready This method signals to the client that the connection is ready for use. PARAMETERS: known_hosts: shortstr
[ "signal", "that", "the", "connection", "is", "ready" ]
python
train
PatrikValkovic/grammpy
grammpy/representation/support/_RulesSet.py
https://github.com/PatrikValkovic/grammpy/blob/879ce0ef794ac2823acc19314fcd7a8aba53e50f/grammpy/representation/support/_RulesSet.py#L161-L175
def _get(self, *rules): # type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]] """ Get rules representing parameters. The return rules can be different from parameters, in case parameter define multiple rules in one class. :param rules: For which rules get the representation. :return: List of rules representing parameters. :raise NotRuleException: If the parameter doesn't inherit from Rule. :raise RuleException: If the syntax of the rule is invalid. """ for rule in rules: if not inspect.isclass(rule) or not issubclass(rule, Rule): raise NotRuleException(rule) for r in self._split_rules(rule): yield self._find_rule(r)
[ "def", "_get", "(", "self", ",", "*", "rules", ")", ":", "# type: (Iterable[Type[Rule]]) -> Generator[Type[Rule]]", "for", "rule", "in", "rules", ":", "if", "not", "inspect", ".", "isclass", "(", "rule", ")", "or", "not", "issubclass", "(", "rule", ",", "Rul...
Get rules representing parameters. The return rules can be different from parameters, in case parameter define multiple rules in one class. :param rules: For which rules get the representation. :return: List of rules representing parameters. :raise NotRuleException: If the parameter doesn't inherit from Rule. :raise RuleException: If the syntax of the rule is invalid.
[ "Get", "rules", "representing", "parameters", ".", "The", "return", "rules", "can", "be", "different", "from", "parameters", "in", "case", "parameter", "define", "multiple", "rules", "in", "one", "class", ".", ":", "param", "rules", ":", "For", "which", "rul...
python
train
tanghaibao/goatools
goatools/base.py
https://github.com/tanghaibao/goatools/blob/407682e573a108864a79031f8ca19ee3bf377626/goatools/base.py#L127-L138
def download_ncbi_associations(gene2go="gene2go", prt=sys.stdout, loading_bar=True): """Download associations from NCBI, if necessary""" # Download: ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz gzip_file = "{GENE2GO}.gz".format(GENE2GO=gene2go) if not os.path.isfile(gene2go): file_remote = "ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/{GZ}".format( GZ=os.path.basename(gzip_file)) dnld_file(file_remote, gene2go, prt, loading_bar) else: if prt is not None: prt.write(" EXISTS: {FILE}\n".format(FILE=gene2go)) return gene2go
[ "def", "download_ncbi_associations", "(", "gene2go", "=", "\"gene2go\"", ",", "prt", "=", "sys", ".", "stdout", ",", "loading_bar", "=", "True", ")", ":", "# Download: ftp://ftp.ncbi.nlm.nih.gov/gene/DATA/gene2go.gz", "gzip_file", "=", "\"{GENE2GO}.gz\"", ".", "format",...
Download associations from NCBI, if necessary
[ "Download", "associations", "from", "NCBI", "if", "necessary" ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/lstm.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/lstm.py#L423-L429
def lstm_attention_base(): """Base attention params.""" hparams = lstm_seq2seq() hparams.add_hparam("attention_layer_size", hparams.hidden_size) hparams.add_hparam("output_attention", True) hparams.add_hparam("num_heads", 1) return hparams
[ "def", "lstm_attention_base", "(", ")", ":", "hparams", "=", "lstm_seq2seq", "(", ")", "hparams", ".", "add_hparam", "(", "\"attention_layer_size\"", ",", "hparams", ".", "hidden_size", ")", "hparams", ".", "add_hparam", "(", "\"output_attention\"", ",", "True", ...
Base attention params.
[ "Base", "attention", "params", "." ]
python
train
zsimic/runez
src/runez/program.py
https://github.com/zsimic/runez/blob/14363b719a1aae1528859a501a22d075ce0abfcc/src/runez/program.py#L76-L97
def make_executable(path, fatal=True): """ :param str|None path: chmod file with 'path' as executable :param bool|None fatal: Abort execution on failure if True :return int: 1 if effectively done, 0 if no-op, -1 on failure """ if is_executable(path): return 0 if is_dryrun(): LOG.debug("Would make %s executable", short(path)) return 1 if not os.path.exists(path): return abort("%s does not exist, can't make it executable", short(path), fatal=(fatal, -1)) try: os.chmod(path, 0o755) # nosec return 1 except Exception as e: return abort("Can't chmod %s: %s", short(path), e, fatal=(fatal, -1))
[ "def", "make_executable", "(", "path", ",", "fatal", "=", "True", ")", ":", "if", "is_executable", "(", "path", ")", ":", "return", "0", "if", "is_dryrun", "(", ")", ":", "LOG", ".", "debug", "(", "\"Would make %s executable\"", ",", "short", "(", "path"...
:param str|None path: chmod file with 'path' as executable :param bool|None fatal: Abort execution on failure if True :return int: 1 if effectively done, 0 if no-op, -1 on failure
[ ":", "param", "str|None", "path", ":", "chmod", "file", "with", "path", "as", "executable", ":", "param", "bool|None", "fatal", ":", "Abort", "execution", "on", "failure", "if", "True", ":", "return", "int", ":", "1", "if", "effectively", "done", "0", "i...
python
train
inveniosoftware-contrib/record-recommender
record_recommender/cli.py
https://github.com/inveniosoftware-contrib/record-recommender/blob/07f71e783369e6373218b5e6ba0bf15901e9251a/record_recommender/cli.py#L112-L121
def profiles(weeks): """ Number of weeks to build. Starting with the current week. """ profiles = Profiles(store) weeks = get_last_weeks(weeks) if isinstance(weeks, int) else weeks print(weeks) profiles.create(weeks)
[ "def", "profiles", "(", "weeks", ")", ":", "profiles", "=", "Profiles", "(", "store", ")", "weeks", "=", "get_last_weeks", "(", "weeks", ")", "if", "isinstance", "(", "weeks", ",", "int", ")", "else", "weeks", "print", "(", "weeks", ")", "profiles", "....
Number of weeks to build. Starting with the current week.
[ "Number", "of", "weeks", "to", "build", "." ]
python
train
gregoil/ipdbugger
ipdbugger/__init__.py
https://github.com/gregoil/ipdbugger/blob/9575734ec26f6be86ae263496d50eb60bb988b21/ipdbugger/__init__.py#L213-L241
def visit_Call(self, node): """Propagate 'debug' wrapper into inner function calls if needed. Args: node (ast.AST): node statement to surround. """ if self.depth == 0: return node if self.ignore_exceptions is None: ignore_exceptions = ast.Name("None", ast.Load()) else: ignore_exceptions = ast.List(self.ignore_exceptions, ast.Load()) catch_exception_type = self.catch_exception \ if self.catch_exception else "None" catch_exception = ast.Name(catch_exception_type, ast.Load()) depth = ast.Num(self.depth - 1 if self.depth > 0 else -1) debug_node_name = ast.Name("debug", ast.Load()) call_extra_parameters = [] if IS_PYTHON_3 else [None, None] node.func = ast.Call(debug_node_name, [node.func, ignore_exceptions, catch_exception, depth], [], *call_extra_parameters) return node
[ "def", "visit_Call", "(", "self", ",", "node", ")", ":", "if", "self", ".", "depth", "==", "0", ":", "return", "node", "if", "self", ".", "ignore_exceptions", "is", "None", ":", "ignore_exceptions", "=", "ast", ".", "Name", "(", "\"None\"", ",", "ast",...
Propagate 'debug' wrapper into inner function calls if needed. Args: node (ast.AST): node statement to surround.
[ "Propagate", "debug", "wrapper", "into", "inner", "function", "calls", "if", "needed", "." ]
python
train
galactics/beyond
beyond/env/solarsystem.py
https://github.com/galactics/beyond/blob/7a7590ff0fd4c0bac3e8e383ecca03caa98e5742/beyond/env/solarsystem.py#L119-L167
def propagate(cls, date): """Compute the position of the sun at a given date Args: date (~beyond.utils.date.Date) Return: ~beyond.orbits.orbit.Orbit: Position of the sun in MOD frame Example: .. code-block:: python from beyond.utils.date import Date SunPropagator.propagate(Date(2006, 4, 2)) # Orbit = # date = 2006-04-02T00:00:00 UTC # form = Cartesian # frame = MOD # propag = SunPropagator # coord = # x = 146186235644.0 # y = 28789144480.5 # z = 12481136552.3 # vx = 0.0 # vy = 0.0 # vz = 0.0 """ date = date.change_scale('UT1') t_ut1 = date.julian_century lambda_M = 280.460 + 36000.771 * t_ut1 M = np.radians(357.5291092 + 35999.05034 * t_ut1) lambda_el = np.radians(lambda_M + 1.914666471 * np.sin(M) + 0.019994643 * np.sin(2 * M)) r = 1.000140612 - 0.016708617 * np.cos(M) - 0.000139589 * np.cos(2 * M) eps = np.radians(23.439291 - 0.0130042 * t_ut1) pv = r * np.array([ np.cos(lambda_el), np.cos(eps) * np.sin(lambda_el), np.sin(eps) * np.sin(lambda_el), 0, 0, 0 ]) * AU return Orbit(date, pv, 'cartesian', 'MOD', cls())
[ "def", "propagate", "(", "cls", ",", "date", ")", ":", "date", "=", "date", ".", "change_scale", "(", "'UT1'", ")", "t_ut1", "=", "date", ".", "julian_century", "lambda_M", "=", "280.460", "+", "36000.771", "*", "t_ut1", "M", "=", "np", ".", "radians",...
Compute the position of the sun at a given date Args: date (~beyond.utils.date.Date) Return: ~beyond.orbits.orbit.Orbit: Position of the sun in MOD frame Example: .. code-block:: python from beyond.utils.date import Date SunPropagator.propagate(Date(2006, 4, 2)) # Orbit = # date = 2006-04-02T00:00:00 UTC # form = Cartesian # frame = MOD # propag = SunPropagator # coord = # x = 146186235644.0 # y = 28789144480.5 # z = 12481136552.3 # vx = 0.0 # vy = 0.0 # vz = 0.0
[ "Compute", "the", "position", "of", "the", "sun", "at", "a", "given", "date" ]
python
train
ungarj/mapchete
mapchete/config.py
https://github.com/ungarj/mapchete/blob/d482918d0e66a5b414dff6aa7cc854e01fc60ee4/mapchete/config.py#L266-L298
def output(self): """Output object of driver.""" output_params = dict( self._raw["output"], grid=self.output_pyramid.grid, pixelbuffer=self.output_pyramid.pixelbuffer, metatiling=self.output_pyramid.metatiling ) if "path" in output_params: output_params.update( path=absolute_path(path=output_params["path"], base_dir=self.config_dir) ) if "format" not in output_params: raise MapcheteConfigError("output format not specified") if output_params["format"] not in available_output_formats(): raise MapcheteConfigError( "format %s not available in %s" % ( output_params["format"], str(available_output_formats()) ) ) writer = load_output_writer(output_params) try: writer.is_valid_with_config(output_params) except Exception as e: logger.exception(e) raise MapcheteConfigError( "driver %s not compatible with configuration: %s" % ( writer.METADATA["driver_name"], e ) ) return writer
[ "def", "output", "(", "self", ")", ":", "output_params", "=", "dict", "(", "self", ".", "_raw", "[", "\"output\"", "]", ",", "grid", "=", "self", ".", "output_pyramid", ".", "grid", ",", "pixelbuffer", "=", "self", ".", "output_pyramid", ".", "pixelbuffe...
Output object of driver.
[ "Output", "object", "of", "driver", "." ]
python
valid
luckydonald/pytgbot
code_generation/output/pytgbot/api_types/receivable/stickers.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/stickers.py#L237-L255
def from_array(array): """ Deserialize a new MaskPosition from a given dictionary. :return: new MaskPosition instance. :rtype: MaskPosition """ if array is None or not array: return None # end if assert_type_or_raise(array, dict, parameter_name="array") data = {} data['point'] = u(array.get('point')) data['x_shift'] = float(array.get('x_shift')) data['y_shift'] = float(array.get('y_shift')) data['scale'] = float(array.get('scale')) data['_raw'] = array return MaskPosition(**data)
[ "def", "from_array", "(", "array", ")", ":", "if", "array", "is", "None", "or", "not", "array", ":", "return", "None", "# end if", "assert_type_or_raise", "(", "array", ",", "dict", ",", "parameter_name", "=", "\"array\"", ")", "data", "=", "{", "}", "da...
Deserialize a new MaskPosition from a given dictionary. :return: new MaskPosition instance. :rtype: MaskPosition
[ "Deserialize", "a", "new", "MaskPosition", "from", "a", "given", "dictionary", "." ]
python
train
sibirrer/lenstronomy
lenstronomy/GalKin/galkin_old.py
https://github.com/sibirrer/lenstronomy/blob/4edb100a4f3f4fdc4fac9b0032d2b0283d0aa1d6/lenstronomy/GalKin/galkin_old.py#L48-L69
def _vel_disp_one(self, kwargs_profile, kwargs_aperture, kwargs_light, kwargs_anisotropy): """ computes one realisation of the velocity dispersion realized in the slit :param gamma: :param rho0_r0_gamma: :param r_eff: :param r_ani: :param R_slit: :param dR_slit: :param FWHM: :return: """ while True: r = self.lightProfile.draw_light(kwargs_light) # draw r R, x, y = util.R_r(r) # draw projected R x_, y_ = util.displace_PSF(x, y, self.FWHM) # displace via PSF bool = self.aperture.aperture_select(x_, y_, kwargs_aperture) if bool is True: break sigma_s2 = self.sigma_s2(r, R, kwargs_profile, kwargs_anisotropy, kwargs_light) return sigma_s2
[ "def", "_vel_disp_one", "(", "self", ",", "kwargs_profile", ",", "kwargs_aperture", ",", "kwargs_light", ",", "kwargs_anisotropy", ")", ":", "while", "True", ":", "r", "=", "self", ".", "lightProfile", ".", "draw_light", "(", "kwargs_light", ")", "# draw r", "...
computes one realisation of the velocity dispersion realized in the slit :param gamma: :param rho0_r0_gamma: :param r_eff: :param r_ani: :param R_slit: :param dR_slit: :param FWHM: :return:
[ "computes", "one", "realisation", "of", "the", "velocity", "dispersion", "realized", "in", "the", "slit", ":", "param", "gamma", ":", ":", "param", "rho0_r0_gamma", ":", ":", "param", "r_eff", ":", ":", "param", "r_ani", ":", ":", "param", "R_slit", ":", ...
python
train
shichao-an/115wangpan
u115/api.py
https://github.com/shichao-an/115wangpan/blob/e7cc935313f675e886bceca831fcffcdedf1e880/u115/api.py#L830-L840
def _req_files_edit(self, fid, file_name=None, is_mark=0): """Edit a file or directory""" url = self.web_api_url + '/edit' data = locals() del data['self'] req = Request(method='POST', url=url, data=data) res = self.http.send(req) if res.state: return True else: raise RequestFailure('Failed to access files API.')
[ "def", "_req_files_edit", "(", "self", ",", "fid", ",", "file_name", "=", "None", ",", "is_mark", "=", "0", ")", ":", "url", "=", "self", ".", "web_api_url", "+", "'/edit'", "data", "=", "locals", "(", ")", "del", "data", "[", "'self'", "]", "req", ...
Edit a file or directory
[ "Edit", "a", "file", "or", "directory" ]
python
train
greenbone/ospd
ospd/misc.py
https://github.com/greenbone/ospd/blob/cef773166b15a19c17764721d3fe404fa0e107bf/ospd/misc.py#L293-L300
def get_credentials(self, scan_id, target): """ Get a scan's credential list. It return dictionary with the corresponding credential for a given target. """ if target: for item in self.scans_table[scan_id]['targets']: if target == item[0]: return item[2]
[ "def", "get_credentials", "(", "self", ",", "scan_id", ",", "target", ")", ":", "if", "target", ":", "for", "item", "in", "self", ".", "scans_table", "[", "scan_id", "]", "[", "'targets'", "]", ":", "if", "target", "==", "item", "[", "0", "]", ":", ...
Get a scan's credential list. It return dictionary with the corresponding credential for a given target.
[ "Get", "a", "scan", "s", "credential", "list", ".", "It", "return", "dictionary", "with", "the", "corresponding", "credential", "for", "a", "given", "target", "." ]
python
train
pgjones/quart
quart/blueprints.py
https://github.com/pgjones/quart/blob/7cb2d3bd98e8746025764f2b933abc12041fa175/quart/blueprints.py#L647-L663
def url_defaults(self, func: Callable) -> Callable: """Add a url default preprocessor. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.url_defaults`. This will apply to urls in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.url_defaults def default(endpoint, values): ... """ self.record_once(lambda state: state.app.url_defaults(func, self.name)) return func
[ "def", "url_defaults", "(", "self", ",", "func", ":", "Callable", ")", "->", "Callable", ":", "self", ".", "record_once", "(", "lambda", "state", ":", "state", ".", "app", ".", "url_defaults", "(", "func", ",", "self", ".", "name", ")", ")", "return", ...
Add a url default preprocessor. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.url_defaults`. This will apply to urls in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.url_defaults def default(endpoint, values): ...
[ "Add", "a", "url", "default", "preprocessor", "." ]
python
train
apache/incubator-heron
heron/tools/cli/src/python/main.py
https://github.com/apache/incubator-heron/blob/ad10325a0febe89ad337e561ebcbe37ec5d9a5ac/heron/tools/cli/src/python/main.py#L78-L92
def get_command_handlers(): ''' Create a map of command names and handlers ''' return { 'activate': activate, 'config': hconfig, 'deactivate': deactivate, 'help': cli_help, 'kill': kill, 'restart': restart, 'submit': submit, 'update': update, 'version': version }
[ "def", "get_command_handlers", "(", ")", ":", "return", "{", "'activate'", ":", "activate", ",", "'config'", ":", "hconfig", ",", "'deactivate'", ":", "deactivate", ",", "'help'", ":", "cli_help", ",", "'kill'", ":", "kill", ",", "'restart'", ":", "restart",...
Create a map of command names and handlers
[ "Create", "a", "map", "of", "command", "names", "and", "handlers" ]
python
valid
pydot/pydot-ng
pydot_ng/__init__.py
https://github.com/pydot/pydot-ng/blob/16f39800b6f5dc28d291a4d7763bbac04b9efe72/pydot_ng/__init__.py#L328-L362
def graph_from_adjacency_matrix(matrix, node_prefix='', directed=False): """Creates a basic graph out of an adjacency matrix. The matrix has to be a list of rows of values representing an adjacency matrix. The values can be anything: bool, int, float, as long as they can evaluate to True or False. """ node_orig = 1 if directed: graph = Dot(graph_type='digraph') else: graph = Dot(graph_type='graph') for row in matrix: if not directed: skip = matrix.index(row) r = row[skip:] else: skip = 0 r = row node_dest = skip + 1 for e in r: if e: graph.add_edge( Edge( node_prefix + node_orig, node_prefix + node_dest)) node_dest += 1 node_orig += 1 return graph
[ "def", "graph_from_adjacency_matrix", "(", "matrix", ",", "node_prefix", "=", "''", ",", "directed", "=", "False", ")", ":", "node_orig", "=", "1", "if", "directed", ":", "graph", "=", "Dot", "(", "graph_type", "=", "'digraph'", ")", "else", ":", "graph", ...
Creates a basic graph out of an adjacency matrix. The matrix has to be a list of rows of values representing an adjacency matrix. The values can be anything: bool, int, float, as long as they can evaluate to True or False.
[ "Creates", "a", "basic", "graph", "out", "of", "an", "adjacency", "matrix", "." ]
python
train
Robpol86/libnl
libnl/nl80211/iw_util.py
https://github.com/Robpol86/libnl/blob/274e9fdaa39822d06ef70b799ed4a95937a4d923/libnl/nl80211/iw_util.py#L131-L159
def get_ht_mcs(mcs): """http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n591. Positional arguments: mcs -- bytearray. Returns: Dict. """ answers = dict() max_rx_supp_data_rate = (mcs[10] & ((mcs[11] & 0x3) << 8)) tx_mcs_set_defined = not not (mcs[12] & (1 << 0)) tx_mcs_set_equal = not (mcs[12] & (1 << 1)) tx_max_num_spatial_streams = ((mcs[12] >> 2) & 3) + 1 tx_unequal_modulation = not not (mcs[12] & (1 << 4)) if max_rx_supp_data_rate: answers['HT Max RX data rate (Mbps)'] = max_rx_supp_data_rate if tx_mcs_set_defined and tx_mcs_set_equal: answers['HT TX/RX MCS rate indexes supported'] = get_mcs_index(mcs) elif tx_mcs_set_defined: answers['HT RX MCS rate indexes supported'] = get_mcs_index(mcs) answers['TX unequal modulation supported'] = bool(tx_unequal_modulation) answers['HT TX Max spatial streams'] = tx_max_num_spatial_streams else: answers['HT RX MCS rate indexes supported'] = get_mcs_index(mcs) return answers
[ "def", "get_ht_mcs", "(", "mcs", ")", ":", "answers", "=", "dict", "(", ")", "max_rx_supp_data_rate", "=", "(", "mcs", "[", "10", "]", "&", "(", "(", "mcs", "[", "11", "]", "&", "0x3", ")", "<<", "8", ")", ")", "tx_mcs_set_defined", "=", "not", "...
http://git.kernel.org/cgit/linux/kernel/git/jberg/iw.git/tree/util.c?id=v3.17#n591. Positional arguments: mcs -- bytearray. Returns: Dict.
[ "http", ":", "//", "git", ".", "kernel", ".", "org", "/", "cgit", "/", "linux", "/", "kernel", "/", "git", "/", "jberg", "/", "iw", ".", "git", "/", "tree", "/", "util", ".", "c?id", "=", "v3", ".", "17#n591", "." ]
python
train
ryanvarley/ExoData
exodata/plots.py
https://github.com/ryanvarley/ExoData/blob/e0d3652117214d2377a707d6778f93b7eb201a41/exodata/plots.py#L577-L604
def setup_keys(self): """ Build the initial data dictionary to store the values """ discovery_methods = {} discovery_years = {} nan_list = [] # Initial Loop to get keys for planet in self.planet_list: if 'Solar System' in planet.params['list'] and self.skip_solar_system_planets: continue try: discovery_methods[planet.discoveryMethod] += 1 except KeyError: discovery_methods[planet.discoveryMethod] = 1 try: discovery_years[planet.discoveryYear] += 1 except KeyError: discovery_years[planet.discoveryYear] = 1 if planet.discoveryMethod is np.nan: nan_list.append(planet) self.nan_list = nan_list return discovery_years
[ "def", "setup_keys", "(", "self", ")", ":", "discovery_methods", "=", "{", "}", "discovery_years", "=", "{", "}", "nan_list", "=", "[", "]", "# Initial Loop to get keys", "for", "planet", "in", "self", ".", "planet_list", ":", "if", "'Solar System'", "in", "...
Build the initial data dictionary to store the values
[ "Build", "the", "initial", "data", "dictionary", "to", "store", "the", "values" ]
python
train
python-beaver/python-beaver
beaver/worker/tail.py
https://github.com/python-beaver/python-beaver/blob/93941e968016c5a962dffed9e7a9f6dc1d23236c/beaver/worker/tail.py#L381-L396
def _sincedb_init(self): """Initializes the sincedb schema in an sqlite db""" if not self._sincedb_path: return if not os.path.exists(self._sincedb_path): self._log_debug('initializing sincedb sqlite schema') conn = sqlite3.connect(self._sincedb_path, isolation_level=None) conn.execute(""" create table sincedb ( fid text primary key, filename text, position integer default 1 ); """) conn.close()
[ "def", "_sincedb_init", "(", "self", ")", ":", "if", "not", "self", ".", "_sincedb_path", ":", "return", "if", "not", "os", ".", "path", ".", "exists", "(", "self", ".", "_sincedb_path", ")", ":", "self", ".", "_log_debug", "(", "'initializing sincedb sqli...
Initializes the sincedb schema in an sqlite db
[ "Initializes", "the", "sincedb", "schema", "in", "an", "sqlite", "db" ]
python
train
Workiva/furious
furious/async.py
https://github.com/Workiva/furious/blob/c29823ec8b98549e7439d7273aa064d1e5830632/furious/async.py#L251-L261
def _update_job(self, target, args, kwargs): """Specify the function this async job is to execute when run.""" target_path, options = get_function_path_and_options(target) assert isinstance(args, (tuple, list)) or args is None assert isinstance(kwargs, dict) or kwargs is None if options: self.update_options(**options) self._options['job'] = (target_path, args, kwargs)
[ "def", "_update_job", "(", "self", ",", "target", ",", "args", ",", "kwargs", ")", ":", "target_path", ",", "options", "=", "get_function_path_and_options", "(", "target", ")", "assert", "isinstance", "(", "args", ",", "(", "tuple", ",", "list", ")", ")", ...
Specify the function this async job is to execute when run.
[ "Specify", "the", "function", "this", "async", "job", "is", "to", "execute", "when", "run", "." ]
python
train
Esri/ArcREST
src/arcrest/manageags/_system.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageags/_system.py#L351-L358
def configurationStore(self): """returns the ConfigurationStore object for this site""" url = self._url + "/configstore" return ConfigurationStore(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
[ "def", "configurationStore", "(", "self", ")", ":", "url", "=", "self", ".", "_url", "+", "\"/configstore\"", "return", "ConfigurationStore", "(", "url", "=", "url", ",", "securityHandler", "=", "self", ".", "_securityHandler", ",", "proxy_url", "=", "self", ...
returns the ConfigurationStore object for this site
[ "returns", "the", "ConfigurationStore", "object", "for", "this", "site" ]
python
train
0101/pipetools
pipetools/decorators.py
https://github.com/0101/pipetools/blob/42f71af0ecaeacee0f3d64c8706ddb1caacf8bc1/pipetools/decorators.py#L42-L56
def auto_string_formatter(func): """ Decorator that handles automatic string formatting. By converting a string argument to a function that does formatting on said string. """ @wraps(func) def auto_string_formatter_wrapper(function, *args, **kwargs): if isinstance(function, string_types): function = StringFormatter(function) return func(function, *args, **kwargs) return auto_string_formatter_wrapper
[ "def", "auto_string_formatter", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "auto_string_formatter_wrapper", "(", "function", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "isinstance", "(", "function", ",", "string_types", "...
Decorator that handles automatic string formatting. By converting a string argument to a function that does formatting on said string.
[ "Decorator", "that", "handles", "automatic", "string", "formatting", "." ]
python
train
tyarkoni/pliers
pliers/graph.py
https://github.com/tyarkoni/pliers/blob/5b3385960ebd8c6ef1e86dd5e1be0080b2cb7f2b/pliers/graph.py#L300-L305
def to_json(self): ''' Returns the JSON representation of this graph. ''' roots = [] for r in self.roots: roots.append(r.to_json()) return {'roots': roots}
[ "def", "to_json", "(", "self", ")", ":", "roots", "=", "[", "]", "for", "r", "in", "self", ".", "roots", ":", "roots", ".", "append", "(", "r", ".", "to_json", "(", ")", ")", "return", "{", "'roots'", ":", "roots", "}" ]
Returns the JSON representation of this graph.
[ "Returns", "the", "JSON", "representation", "of", "this", "graph", "." ]
python
train
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_visualizer.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_visualizer.py#L70-L94
def bpmn_diagram_to_png(bpmn_diagram, file_name): """ Create a png picture for given diagram :param bpmn_diagram: an instance of BPMNDiagramGraph class, :param file_name: name of generated file. """ g = bpmn_diagram.diagram_graph graph = pydotplus.Dot() for node in g.nodes(data=True): if node[1].get(consts.Consts.type) == consts.Consts.task: n = pydotplus.Node(name=node[0], shape="box", style="rounded", label=node[1].get(consts.Consts.node_name)) elif node[1].get(consts.Consts.type) == consts.Consts.exclusive_gateway: n = pydotplus.Node(name=node[0], shape="diamond", label=node[1].get(consts.Consts.node_name)) else: n = pydotplus.Node(name=node[0], label=node[1].get(consts.Consts.node_name)) graph.add_node(n) for edge in g.edges(data=True): e = pydotplus.Edge(src=edge[0], dst=edge[1], label=edge[2].get(consts.Consts.name)) graph.add_edge(e) graph.write(file_name + ".png", format='png')
[ "def", "bpmn_diagram_to_png", "(", "bpmn_diagram", ",", "file_name", ")", ":", "g", "=", "bpmn_diagram", ".", "diagram_graph", "graph", "=", "pydotplus", ".", "Dot", "(", ")", "for", "node", "in", "g", ".", "nodes", "(", "data", "=", "True", ")", ":", ...
Create a png picture for given diagram :param bpmn_diagram: an instance of BPMNDiagramGraph class, :param file_name: name of generated file.
[ "Create", "a", "png", "picture", "for", "given", "diagram" ]
python
train
soimort/you-get
src/you_get/extractors/ucas.py
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/ucas.py#L119-L127
def ucas_download_playlist(url, output_dir = '.', merge = False, info_only = False, **kwargs): '''course page''' html = get_content(url) parts = re.findall( r'(getplaytitle.do\?.+)"', html) assert parts, 'No part found!' for part_path in parts: ucas_download('http://v.ucas.ac.cn/course/' + part_path, output_dir=output_dir, merge=merge, info_only=info_only)
[ "def", "ucas_download_playlist", "(", "url", ",", "output_dir", "=", "'.'", ",", "merge", "=", "False", ",", "info_only", "=", "False", ",", "*", "*", "kwargs", ")", ":", "html", "=", "get_content", "(", "url", ")", "parts", "=", "re", ".", "findall", ...
course page
[ "course", "page" ]
python
test
Azure/msrest-for-python
msrest/authentication.py
https://github.com/Azure/msrest-for-python/blob/0732bc90bdb290e5f58c675ffdd7dbfa9acefc93/msrest/authentication.py#L67-L81
def signed_session(self, session=None): # type: (Optional[requests.Session]) -> requests.Session """Create requests session with any required auth headers applied. If a session object is provided, configure it directly. Otherwise, create a new session and return it. :param session: The session to configure for authentication :type session: requests.Session :rtype: requests.Session """ session = super(BasicAuthentication, self).signed_session(session) session.auth = HTTPBasicAuth(self.username, self.password) return session
[ "def", "signed_session", "(", "self", ",", "session", "=", "None", ")", ":", "# type: (Optional[requests.Session]) -> requests.Session", "session", "=", "super", "(", "BasicAuthentication", ",", "self", ")", ".", "signed_session", "(", "session", ")", "session", "."...
Create requests session with any required auth headers applied. If a session object is provided, configure it directly. Otherwise, create a new session and return it. :param session: The session to configure for authentication :type session: requests.Session :rtype: requests.Session
[ "Create", "requests", "session", "with", "any", "required", "auth", "headers", "applied", "." ]
python
train
carljm/django-adminfiles
adminfiles/flickr.py
https://github.com/carljm/django-adminfiles/blob/b01dc7be266305d575c11d5ff9a37ccac04a78c2/adminfiles/flickr.py#L222-L234
def getURL(self, size='Medium', urlType='url'): """Retrieves a url for the photo. (flickr.photos.getSizes) urlType - 'url' or 'source' 'url' - flickr page of photo 'source' - image file """ method = 'flickr.photos.getSizes' data = _doget(method, photo_id=self.id) for psize in data.rsp.sizes.size: if psize.label == size: return getattr(psize, urlType) raise FlickrError, "No URL found"
[ "def", "getURL", "(", "self", ",", "size", "=", "'Medium'", ",", "urlType", "=", "'url'", ")", ":", "method", "=", "'flickr.photos.getSizes'", "data", "=", "_doget", "(", "method", ",", "photo_id", "=", "self", ".", "id", ")", "for", "psize", "in", "da...
Retrieves a url for the photo. (flickr.photos.getSizes) urlType - 'url' or 'source' 'url' - flickr page of photo 'source' - image file
[ "Retrieves", "a", "url", "for", "the", "photo", ".", "(", "flickr", ".", "photos", ".", "getSizes", ")" ]
python
train
onelogin/python3-saml
src/onelogin/saml2/response.py
https://github.com/onelogin/python3-saml/blob/064b7275fba1e5f39a9116ba1cdcc5d01fc34daa/src/onelogin/saml2/response.py#L501-L513
def get_session_not_on_or_after(self): """ Gets the SessionNotOnOrAfter from the AuthnStatement Could be used to set the local session expiration :returns: The SessionNotOnOrAfter value :rtype: time|None """ not_on_or_after = None authn_statement_nodes = self.__query_assertion('/saml:AuthnStatement[@SessionNotOnOrAfter]') if authn_statement_nodes: not_on_or_after = OneLogin_Saml2_Utils.parse_SAML_to_time(authn_statement_nodes[0].get('SessionNotOnOrAfter')) return not_on_or_after
[ "def", "get_session_not_on_or_after", "(", "self", ")", ":", "not_on_or_after", "=", "None", "authn_statement_nodes", "=", "self", ".", "__query_assertion", "(", "'/saml:AuthnStatement[@SessionNotOnOrAfter]'", ")", "if", "authn_statement_nodes", ":", "not_on_or_after", "=",...
Gets the SessionNotOnOrAfter from the AuthnStatement Could be used to set the local session expiration :returns: The SessionNotOnOrAfter value :rtype: time|None
[ "Gets", "the", "SessionNotOnOrAfter", "from", "the", "AuthnStatement", "Could", "be", "used", "to", "set", "the", "local", "session", "expiration" ]
python
train
xtrementl/focus
focus/plugin/modules/im.py
https://github.com/xtrementl/focus/blob/cbbbc0b49a7409f9e0dc899de5b7e057f50838e4/focus/plugin/modules/im.py#L165-L213
def _empathy_status(status, message): """ Updates status and message for Empathy IM application. `status` Status type. `message` Status message. """ ACCT_IFACE = 'org.freedesktop.Telepathy.Account' DBUS_PROP_IFACE = 'org.freedesktop.DBus.Properties' ACCT_MAN_IFACE = 'org.freedesktop.Telepathy.AccountManager' ACCT_MAN_PATH = '/org/freedesktop/Telepathy/AccountManager' SP_IFACE = ('org.freedesktop.Telepathy.' 'Connection.Interface.SimplePresence') # fetch main account manager interface am_iface = _dbus_get_interface(ACCT_MAN_IFACE, ACCT_MAN_PATH, DBUS_PROP_IFACE) if am_iface: account_paths = am_iface.Get(ACCT_MAN_IFACE, 'ValidAccounts') for account_path in account_paths: try: # fetch account interface account = _dbus_get_object(ACCT_MAN_IFACE, account_path) # skip disconnected, disabled, etc. if account.Get(ACCT_IFACE, 'ConnectionStatus') != 0: continue # fetch simple presence interface for account connection conn_path = account.Get(ACCT_IFACE, 'Connection') conn_iface = conn_path.replace("/", ".")[1:] sp_iface = _dbus_get_interface(conn_iface, conn_path, SP_IFACE) except dbus.exceptions.DBusException: continue # set status and message for code in EMPATHY_CODE_MAP[status]: try: sp_iface.SetPresence(code, message) except dbus.exceptions.DBusException: pass else: break
[ "def", "_empathy_status", "(", "status", ",", "message", ")", ":", "ACCT_IFACE", "=", "'org.freedesktop.Telepathy.Account'", "DBUS_PROP_IFACE", "=", "'org.freedesktop.DBus.Properties'", "ACCT_MAN_IFACE", "=", "'org.freedesktop.Telepathy.AccountManager'", "ACCT_MAN_PATH", "=", "...
Updates status and message for Empathy IM application. `status` Status type. `message` Status message.
[ "Updates", "status", "and", "message", "for", "Empathy", "IM", "application", "." ]
python
train
brocade/pynos
pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_monitoring.py
https://github.com/brocade/pynos/blob/bd8a34e98f322de3fc06750827d8bbc3a0c00380/pynos/versions/ver_6/ver_6_0_1/yang/ietf_netconf_monitoring.py#L342-L352
def netconf_state_statistics_netconf_start_time(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") netconf_state = ET.SubElement(config, "netconf-state", xmlns="urn:ietf:params:xml:ns:yang:ietf-netconf-monitoring") statistics = ET.SubElement(netconf_state, "statistics") netconf_start_time = ET.SubElement(statistics, "netconf-start-time") netconf_start_time.text = kwargs.pop('netconf_start_time') callback = kwargs.pop('callback', self._callback) return callback(config)
[ "def", "netconf_state_statistics_netconf_start_time", "(", "self", ",", "*", "*", "kwargs", ")", ":", "config", "=", "ET", ".", "Element", "(", "\"config\"", ")", "netconf_state", "=", "ET", ".", "SubElement", "(", "config", ",", "\"netconf-state\"", ",", "xml...
Auto Generated Code
[ "Auto", "Generated", "Code" ]
python
train
hayd/pep8radius
pep8radius/vcs.py
https://github.com/hayd/pep8radius/blob/0c1d14835d390f7feeb602f35a768e52ce306a0a/pep8radius/vcs.py#L128-L138
def modified_lines_from_diff(self, diff): """Returns the changed lines in a diff. - Potentially this is vc specific (if not using udiff). Note: this returns the line numbers in descending order. """ from pep8radius.diff import modified_lines_from_udiff for start, end in modified_lines_from_udiff(diff): yield start, end
[ "def", "modified_lines_from_diff", "(", "self", ",", "diff", ")", ":", "from", "pep8radius", ".", "diff", "import", "modified_lines_from_udiff", "for", "start", ",", "end", "in", "modified_lines_from_udiff", "(", "diff", ")", ":", "yield", "start", ",", "end" ]
Returns the changed lines in a diff. - Potentially this is vc specific (if not using udiff). Note: this returns the line numbers in descending order.
[ "Returns", "the", "changed", "lines", "in", "a", "diff", "." ]
python
train
ampl/amplpy
amplpy/ampl.py
https://github.com/ampl/amplpy/blob/39df6954049a11a8f666aed26853259b4687099a/amplpy/ampl.py#L250-L282
def eval(self, amplstatements, **kwargs): """ Parses AMPL code and evaluates it as a possibly empty sequence of AMPL declarations and statements. As a side effect, it invalidates all entities (as the passed statements can contain any arbitrary command); the lists of entities will be re-populated lazily (at first access) The output of interpreting the statements is passed to the current OutputHandler (see getOutputHandler and setOutputHandler). By default, errors and warnings are printed on stdout. This behavior can be changed reassigning an ErrorHandler using setErrorHandler. Args: amplstatements: A collection of AMPL statements and declarations to be passed to the interpreter. Raises: RuntimeError: if the input is not a complete AMPL statement (e.g. if it does not end with semicolon) or if the underlying interpreter is not running. """ if self._langext is not None: amplstatements = self._langext.translate(amplstatements, **kwargs) lock_and_call( lambda: self._impl.eval(amplstatements), self._lock ) self._errorhandler_wrapper.check()
[ "def", "eval", "(", "self", ",", "amplstatements", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "_langext", "is", "not", "None", ":", "amplstatements", "=", "self", ".", "_langext", ".", "translate", "(", "amplstatements", ",", "*", "*", "kwar...
Parses AMPL code and evaluates it as a possibly empty sequence of AMPL declarations and statements. As a side effect, it invalidates all entities (as the passed statements can contain any arbitrary command); the lists of entities will be re-populated lazily (at first access) The output of interpreting the statements is passed to the current OutputHandler (see getOutputHandler and setOutputHandler). By default, errors and warnings are printed on stdout. This behavior can be changed reassigning an ErrorHandler using setErrorHandler. Args: amplstatements: A collection of AMPL statements and declarations to be passed to the interpreter. Raises: RuntimeError: if the input is not a complete AMPL statement (e.g. if it does not end with semicolon) or if the underlying interpreter is not running.
[ "Parses", "AMPL", "code", "and", "evaluates", "it", "as", "a", "possibly", "empty", "sequence", "of", "AMPL", "declarations", "and", "statements", "." ]
python
train
peepall/FancyLogger
FancyLogger/__init__.py
https://github.com/peepall/FancyLogger/blob/7f13f1397e76ed768fb6b6358194118831fafc6d/FancyLogger/__init__.py#L354-L363
def debug(self, text): """ Posts a debug message adding a timestamp and logging level to it for both file and console handlers. Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress at the very time they are being logged but their timestamp will be captured at the right time. Logger will redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw immediately (may produce flickering) then call 'flush' method. :param text: The text to log into file and console. """ self.queue.put(dill.dumps(LogMessageCommand(text=text, level=logging.DEBUG)))
[ "def", "debug", "(", "self", ",", "text", ")", ":", "self", ".", "queue", ".", "put", "(", "dill", ".", "dumps", "(", "LogMessageCommand", "(", "text", "=", "text", ",", "level", "=", "logging", ".", "DEBUG", ")", ")", ")" ]
Posts a debug message adding a timestamp and logging level to it for both file and console handlers. Logger uses a redraw rate because of console flickering. That means it will not draw new messages or progress at the very time they are being logged but their timestamp will be captured at the right time. Logger will redraw at a given time period AND when new messages or progress are logged. If you still want to force redraw immediately (may produce flickering) then call 'flush' method. :param text: The text to log into file and console.
[ "Posts", "a", "debug", "message", "adding", "a", "timestamp", "and", "logging", "level", "to", "it", "for", "both", "file", "and", "console", "handlers", ".", "Logger", "uses", "a", "redraw", "rate", "because", "of", "console", "flickering", ".", "That", "...
python
train
jamesturk/jellyfish
jellyfish/porter.py
https://github.com/jamesturk/jellyfish/blob/699727a6d3ba0ba78a19d70745458d592c140203/jellyfish/porter.py#L95-L100
def vowel_in_stem(self): """ True iff 0...j contains vowel """ for i in range(0, self.j+1): if not self.cons(i): return True return False
[ "def", "vowel_in_stem", "(", "self", ")", ":", "for", "i", "in", "range", "(", "0", ",", "self", ".", "j", "+", "1", ")", ":", "if", "not", "self", ".", "cons", "(", "i", ")", ":", "return", "True", "return", "False" ]
True iff 0...j contains vowel
[ "True", "iff", "0", "...", "j", "contains", "vowel" ]
python
train
SoCo/SoCo
dev_tools/analyse_ws.py
https://github.com/SoCo/SoCo/blob/671937e07d7973b78c0cbee153d4f3ad68ec48c6/dev_tools/analyse_ws.py#L492-L507
def getch(): """ Read a single character non-echoed and return it. Recipe from: http://code.activestate.com/recipes/ 134892-getch-like-unbuffered-character-reading-from-stdin/ """ filedescriptor = sys.stdin.fileno() old_settings = termios.tcgetattr(filedescriptor) if PLATFORM == 'win32': character = msvcrt.getch() else: try: tty.setraw(sys.stdin.fileno()) character = sys.stdin.read(1) finally: termios.tcsetattr(filedescriptor, termios.TCSADRAIN, old_settings) return character
[ "def", "getch", "(", ")", ":", "filedescriptor", "=", "sys", ".", "stdin", ".", "fileno", "(", ")", "old_settings", "=", "termios", ".", "tcgetattr", "(", "filedescriptor", ")", "if", "PLATFORM", "==", "'win32'", ":", "character", "=", "msvcrt", ".", "ge...
Read a single character non-echoed and return it. Recipe from: http://code.activestate.com/recipes/ 134892-getch-like-unbuffered-character-reading-from-stdin/
[ "Read", "a", "single", "character", "non", "-", "echoed", "and", "return", "it", ".", "Recipe", "from", ":", "http", ":", "//", "code", ".", "activestate", ".", "com", "/", "recipes", "/", "134892", "-", "getch", "-", "like", "-", "unbuffered", "-", ...
python
train
tensorflow/mesh
mesh_tensorflow/placement_mesh_impl.py
https://github.com/tensorflow/mesh/blob/3921196e5e43302e820da0a87329f25d7e2a3016/mesh_tensorflow/placement_mesh_impl.py#L285-L309
def _collective_with_groups(self, x, mesh_axes, collective): """Grouped collective, (across the given dimensions). Args: x: a LaidOutTensor mesh_axes: a list of integers - the mesh dimensions to be reduced collective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor) Returns: a LaidOutTensor """ if not mesh_axes: return x x = x.to_laid_out_tensor() if len(mesh_axes) == self.ndims: return self.LaidOutTensor(collective(x.tensor_list, self._devices)) else: groups = mtf.processor_groups(self.shape, mesh_axes) ret = [None] * self.size for g in groups: inputs = [x.tensor_list[pnum] for pnum in g] devices = [self._devices[pnum] for pnum in g] reduced = collective(inputs, devices) for pnum, y in zip(g, reduced): ret[pnum] = y return self.LaidOutTensor(ret)
[ "def", "_collective_with_groups", "(", "self", ",", "x", ",", "mesh_axes", ",", "collective", ")", ":", "if", "not", "mesh_axes", ":", "return", "x", "x", "=", "x", ".", "to_laid_out_tensor", "(", ")", "if", "len", "(", "mesh_axes", ")", "==", "self", ...
Grouped collective, (across the given dimensions). Args: x: a LaidOutTensor mesh_axes: a list of integers - the mesh dimensions to be reduced collective: fn from list(tf.Tensor), list(device) -> list(tf.Tensor) Returns: a LaidOutTensor
[ "Grouped", "collective", "(", "across", "the", "given", "dimensions", ")", "." ]
python
train
pecan/pecan
pecan/core.py
https://github.com/pecan/pecan/blob/833d0653fa0e6bbfb52545b091c30182105f4a82/pecan/core.py#L554-L622
def invoke_controller(self, controller, args, kwargs, state): ''' The main request handler for Pecan applications. ''' cfg = _cfg(controller) content_types = cfg.get('content_types', {}) req = state.request resp = state.response pecan_state = req.pecan # If a keyword is supplied via HTTP GET or POST arguments, but the # function signature does not allow it, just drop it (rather than # generating a TypeError). argspec = getargspec(controller) keys = kwargs.keys() for key in keys: if key not in argspec.args and not argspec.keywords: kwargs.pop(key) # get the result from the controller result = controller(*args, **kwargs) # a controller can return the response object which means they've taken # care of filling it out if result is response: return elif isinstance(result, WebObResponse): state.response = result return raw_namespace = result # pull the template out based upon content type and handle overrides template = content_types.get(pecan_state['content_type']) # check if for controller override of template template = pecan_state.get('override_template', template) if template is None and cfg['explicit_content_type'] is False: if self.default_renderer == 'json': template = 'json' pecan_state['content_type'] = pecan_state.get( 'override_content_type', pecan_state['content_type'] ) # if there is a template, render it if template: if template == 'json': pecan_state['content_type'] = 'application/json' result = self.render(template, result) # If we are in a test request put the namespace where it can be # accessed directly if req.environ.get('paste.testing'): testing_variables = req.environ['paste.testing_variables'] testing_variables['namespace'] = raw_namespace testing_variables['template_name'] = template testing_variables['controller_output'] = result # set the body content if result and isinstance(result, six.text_type): resp.text = result elif result: resp.body = result if pecan_state['content_type']: # set the content type resp.content_type = pecan_state['content_type']
[ "def", "invoke_controller", "(", "self", ",", "controller", ",", "args", ",", "kwargs", ",", "state", ")", ":", "cfg", "=", "_cfg", "(", "controller", ")", "content_types", "=", "cfg", ".", "get", "(", "'content_types'", ",", "{", "}", ")", "req", "=",...
The main request handler for Pecan applications.
[ "The", "main", "request", "handler", "for", "Pecan", "applications", "." ]
python
train
iotile/coretools
iotilebuild/iotile/build/tilebus/descriptor.py
https://github.com/iotile/coretools/blob/2d794f5f1346b841b0dcd16c9d284e9bf2f3c6ec/iotilebuild/iotile/build/tilebus/descriptor.py#L286-L305
def get_block(self, config_only=False): """Create a TileBus Block based on the information in this descriptor""" mib = TBBlock() for cid, config in self.configs.items(): mib.add_config(cid, config) if not config_only: for key, val in self.commands.items(): mib.add_command(key, val) if not self.valid: self._validate_information() mib.set_api_version(*self.variables["APIVersion"]) mib.set_module_version(*self.variables["ModuleVersion"]) mib.set_name(self.variables["ModuleName"]) return mib
[ "def", "get_block", "(", "self", ",", "config_only", "=", "False", ")", ":", "mib", "=", "TBBlock", "(", ")", "for", "cid", ",", "config", "in", "self", ".", "configs", ".", "items", "(", ")", ":", "mib", ".", "add_config", "(", "cid", ",", "config...
Create a TileBus Block based on the information in this descriptor
[ "Create", "a", "TileBus", "Block", "based", "on", "the", "information", "in", "this", "descriptor" ]
python
train
googleapis/google-cloud-python
automl/google/cloud/automl_v1beta1/gapic/prediction_service_client.py
https://github.com/googleapis/google-cloud-python/blob/85e80125a59cb10f8cb105f25ecc099e4b940b50/automl/google/cloud/automl_v1beta1/gapic/prediction_service_client.py#L201-L312
def predict( self, name, payload, params=None, retry=google.api_core.gapic_v1.method.DEFAULT, timeout=google.api_core.gapic_v1.method.DEFAULT, metadata=None, ): """ Perform an online prediction. The prediction result will be directly returned in the response. Available for following ML problems, and their expected request payloads: - Image Classification - Image in .JPEG, .GIF or .PNG format, image\_bytes up to 30MB. - Image Object Detection - Image in .JPEG, .GIF or .PNG format, image\_bytes up to 30MB. - Text Classification - TextSnippet, content up to 10,000 characters, UTF-8 encoded. - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 NFC encoded. \* Translation - TextSnippet, content up to 25,000 characters, UTF-8 encoded. - Tables - Row, with column values matching the columns of the model, up to 5MB. - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 encoded. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.PredictionServiceClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> # TODO: Initialize `payload`: >>> payload = {} >>> >>> response = client.predict(name, payload) Args: name (str): Name of the model requested to serve the prediction. payload (Union[dict, ~google.cloud.automl_v1beta1.types.ExamplePayload]): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.ExamplePayload` params (dict[str -> str]): Additional domain-specific parameters, any string must be up to 25000 characters long. - For Image Classification: ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model makes predictions for an image, it will only produce results that have at least this confidence score. The default is 0.5. - For Image Object Detection: ``score_threshold`` - (float) When Model detects objects on the image, it will only produce bounding boxes which have at least this confidence score. Value in 0 to 1 range, default is 0.5. ``max_bounding_box_count`` - (int64) No more than this number of bounding boxes will be returned in the response. Default is 100, the requested value may be limited by server. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.PredictResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid. """ # Wrap the transport method to add retry and timeout logic. if "predict" not in self._inner_api_calls: self._inner_api_calls[ "predict" ] = google.api_core.gapic_v1.method.wrap_method( self.transport.predict, default_retry=self._method_configs["Predict"].retry, default_timeout=self._method_configs["Predict"].timeout, client_info=self._client_info, ) request = prediction_service_pb2.PredictRequest( name=name, payload=payload, params=params ) if metadata is None: metadata = [] metadata = list(metadata) try: routing_header = [("name", name)] except AttributeError: pass else: routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata( routing_header ) metadata.append(routing_metadata) return self._inner_api_calls["predict"]( request, retry=retry, timeout=timeout, metadata=metadata )
[ "def", "predict", "(", "self", ",", "name", ",", "payload", ",", "params", "=", "None", ",", "retry", "=", "google", ".", "api_core", ".", "gapic_v1", ".", "method", ".", "DEFAULT", ",", "timeout", "=", "google", ".", "api_core", ".", "gapic_v1", ".", ...
Perform an online prediction. The prediction result will be directly returned in the response. Available for following ML problems, and their expected request payloads: - Image Classification - Image in .JPEG, .GIF or .PNG format, image\_bytes up to 30MB. - Image Object Detection - Image in .JPEG, .GIF or .PNG format, image\_bytes up to 30MB. - Text Classification - TextSnippet, content up to 10,000 characters, UTF-8 encoded. - Text Extraction - TextSnippet, content up to 30,000 characters, UTF-8 NFC encoded. \* Translation - TextSnippet, content up to 25,000 characters, UTF-8 encoded. - Tables - Row, with column values matching the columns of the model, up to 5MB. - Text Sentiment - TextSnippet, content up 500 characters, UTF-8 encoded. Example: >>> from google.cloud import automl_v1beta1 >>> >>> client = automl_v1beta1.PredictionServiceClient() >>> >>> name = client.model_path('[PROJECT]', '[LOCATION]', '[MODEL]') >>> >>> # TODO: Initialize `payload`: >>> payload = {} >>> >>> response = client.predict(name, payload) Args: name (str): Name of the model requested to serve the prediction. payload (Union[dict, ~google.cloud.automl_v1beta1.types.ExamplePayload]): Required. Payload to perform a prediction on. The payload must match the problem type that the model was trained to solve. If a dict is provided, it must be of the same form as the protobuf message :class:`~google.cloud.automl_v1beta1.types.ExamplePayload` params (dict[str -> str]): Additional domain-specific parameters, any string must be up to 25000 characters long. - For Image Classification: ``score_threshold`` - (float) A value from 0.0 to 1.0. When the model makes predictions for an image, it will only produce results that have at least this confidence score. The default is 0.5. - For Image Object Detection: ``score_threshold`` - (float) When Model detects objects on the image, it will only produce bounding boxes which have at least this confidence score. Value in 0 to 1 range, default is 0.5. ``max_bounding_box_count`` - (int64) No more than this number of bounding boxes will be returned in the response. Default is 100, the requested value may be limited by server. retry (Optional[google.api_core.retry.Retry]): A retry object used to retry requests. If ``None`` is specified, requests will not be retried. timeout (Optional[float]): The amount of time, in seconds, to wait for the request to complete. Note that if ``retry`` is specified, the timeout applies to each individual attempt. metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata that is provided to the method. Returns: A :class:`~google.cloud.automl_v1beta1.types.PredictResponse` instance. Raises: google.api_core.exceptions.GoogleAPICallError: If the request failed for any reason. google.api_core.exceptions.RetryError: If the request failed due to a retryable error and retry attempts failed. ValueError: If the parameters are invalid.
[ "Perform", "an", "online", "prediction", ".", "The", "prediction", "result", "will", "be", "directly", "returned", "in", "the", "response", ".", "Available", "for", "following", "ML", "problems", "and", "their", "expected", "request", "payloads", ":" ]
python
train
DreamLab/VmShepherd
src/vmshepherd/http/rpc_api.py
https://github.com/DreamLab/VmShepherd/blob/709a412c372b897d53808039c5c64a8b69c12c8d/src/vmshepherd/http/rpc_api.py#L18-L26
def enabled_checker(func): """ Access decorator which checks if a RPC method is enabled by our configuration """ @wraps(func) def wrap(self, *args, **kwargs): if self.allowed_methods and isinstance(self.allowed_methods, list) and func.__name__ not in self.allowed_methods: raise Exception("Method {} is disabled".format(func.__name__)) return func(self, *args, **kwargs) return wrap
[ "def", "enabled_checker", "(", "func", ")", ":", "@", "wraps", "(", "func", ")", "def", "wrap", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "self", ".", "allowed_methods", "and", "isinstance", "(", "self", ".", "allowed_me...
Access decorator which checks if a RPC method is enabled by our configuration
[ "Access", "decorator", "which", "checks", "if", "a", "RPC", "method", "is", "enabled", "by", "our", "configuration" ]
python
train
timgabets/pynblock
pynblock/tools.py
https://github.com/timgabets/pynblock/blob/dbdb6d06bd7741e1138bed09d874b47b23d8d200/pynblock/tools.py#L149-L167
def get_clear_pin(pinblock, account_number): """ Calculate the clear PIN from provided PIN block and account_number, which is the 12 right-most digits of card account number, excluding check digit """ raw_pinblock = bytes.fromhex(pinblock.decode('utf-8')) raw_acct_num = bytes.fromhex((b'0000' + account_number).decode('utf-8')) pin_str = xor(raw2B(raw_pinblock), raw2B(raw_acct_num)).decode('utf-8') pin_length = int(pin_str[:2], 16) if pin_length >= 4 and pin_length < 9: pin = pin_str[2:2+pin_length] try: int(pin) except ValueError: raise ValueError('PIN contains non-numeric characters') return bytes(pin, 'utf-8') else: raise ValueError('Incorrect PIN length: {}'.format(pin_length))
[ "def", "get_clear_pin", "(", "pinblock", ",", "account_number", ")", ":", "raw_pinblock", "=", "bytes", ".", "fromhex", "(", "pinblock", ".", "decode", "(", "'utf-8'", ")", ")", "raw_acct_num", "=", "bytes", ".", "fromhex", "(", "(", "b'0000'", "+", "accou...
Calculate the clear PIN from provided PIN block and account_number, which is the 12 right-most digits of card account number, excluding check digit
[ "Calculate", "the", "clear", "PIN", "from", "provided", "PIN", "block", "and", "account_number", "which", "is", "the", "12", "right", "-", "most", "digits", "of", "card", "account", "number", "excluding", "check", "digit" ]
python
train
PedalPi/PluginsManager
pluginsmanager/observer/mod_host/protocol_parser.py
https://github.com/PedalPi/PluginsManager/blob/2dcc9f6a79b48e9c9be82efffd855352fa15c5c7/pluginsmanager/observer/mod_host/protocol_parser.py#L188-L202
def param_get(param): """ ``param_get <instance_number> <param_symbol>`` get the value of the request control e.g.:: param_get 0 gain :param Lv2Param param: Parameter that will be get your current value """ instance = param.effect.instance return 'param_get {} {}'.format(instance, param.symbol)
[ "def", "param_get", "(", "param", ")", ":", "instance", "=", "param", ".", "effect", ".", "instance", "return", "'param_get {} {}'", ".", "format", "(", "instance", ",", "param", ".", "symbol", ")" ]
``param_get <instance_number> <param_symbol>`` get the value of the request control e.g.:: param_get 0 gain :param Lv2Param param: Parameter that will be get your current value
[ "param_get", "<instance_number", ">", "<param_symbol", ">" ]
python
train
awslabs/serverless-application-model
examples/apps/lex-book-trip-python/lambda_function.py
https://github.com/awslabs/serverless-application-model/blob/cccb0c96b5c91e53355ebc07e542467303a5eedd/examples/apps/lex-book-trip-python/lambda_function.py#L116-L127
def generate_hotel_price(location, nights, room_type): """ Generates a number within a reasonable range that might be expected for a hotel. The price is fixed for a pair of location and roomType. """ room_types = ['queen', 'king', 'deluxe'] cost_of_living = 0 for i in range(len(location)): cost_of_living += ord(location.lower()[i]) - 97 return nights * (100 + cost_of_living + (100 + room_types.index(room_type.lower())))
[ "def", "generate_hotel_price", "(", "location", ",", "nights", ",", "room_type", ")", ":", "room_types", "=", "[", "'queen'", ",", "'king'", ",", "'deluxe'", "]", "cost_of_living", "=", "0", "for", "i", "in", "range", "(", "len", "(", "location", ")", ")...
Generates a number within a reasonable range that might be expected for a hotel. The price is fixed for a pair of location and roomType.
[ "Generates", "a", "number", "within", "a", "reasonable", "range", "that", "might", "be", "expected", "for", "a", "hotel", ".", "The", "price", "is", "fixed", "for", "a", "pair", "of", "location", "and", "roomType", "." ]
python
train
TDG-Platform/cloud-harness
gbdx_cloud_harness/controller.py
https://github.com/TDG-Platform/cloud-harness/blob/1d8f972f861816b90785a484e9bec5bd4bc2f569/gbdx_cloud_harness/controller.py#L76-L83
def invoke(self): """ Execute the command from the arguments. :return: None or Error """ for key in self.FUNCTION_KEYS.keys(): if self._arguments[key] is True: self.FUNCTION_KEYS[key]()
[ "def", "invoke", "(", "self", ")", ":", "for", "key", "in", "self", ".", "FUNCTION_KEYS", ".", "keys", "(", ")", ":", "if", "self", ".", "_arguments", "[", "key", "]", "is", "True", ":", "self", ".", "FUNCTION_KEYS", "[", "key", "]", "(", ")" ]
Execute the command from the arguments. :return: None or Error
[ "Execute", "the", "command", "from", "the", "arguments", ".", ":", "return", ":", "None", "or", "Error" ]
python
test
AbdealiJK/pycolorname
pycolorname/color_system.py
https://github.com/AbdealiJK/pycolorname/blob/d535de3d340a1673906cb484cc4c49c87d296ec0/pycolorname/color_system.py#L105-L126
def find_closest(self, color): """ Find the closest color in the system to the given rgb values. :param color: Tuple of r, g, b values (scaled to 255). :returns: Tuple of name and rgb closest to the given color. """ # Find distance between colors and find name based on closest color rgb = sRGBColor(*color) lab = convert_color(rgb, LabColor, target_illuminant='D65') min_diff = float("inf") min_name, min_color = "", () for known_name, known_color in self.items(): known_rgb = sRGBColor(*known_color) known_lab = convert_color(known_rgb, LabColor, target_illuminant='D65') diff = delta_e_cie1976(lab, known_lab) if min_diff > diff: min_diff = diff min_name = known_name min_color = known_color return min_name, min_color
[ "def", "find_closest", "(", "self", ",", "color", ")", ":", "# Find distance between colors and find name based on closest color", "rgb", "=", "sRGBColor", "(", "*", "color", ")", "lab", "=", "convert_color", "(", "rgb", ",", "LabColor", ",", "target_illuminant", "=...
Find the closest color in the system to the given rgb values. :param color: Tuple of r, g, b values (scaled to 255). :returns: Tuple of name and rgb closest to the given color.
[ "Find", "the", "closest", "color", "in", "the", "system", "to", "the", "given", "rgb", "values", "." ]
python
train
ctuning/ck
ck/kernel.py
https://github.com/ctuning/ck/blob/7e009814e975f8742790d3106340088a46223714/ck/kernel.py#L2380-L2477
def list_all_files(i): """ Input: { path - top level path (file_name) - search for a specific file name (pattern) - return only files with this pattern (path_ext) - path extension (needed for recursion) (limit) - limit number of files (if directories with a large number of files) (number) - current number of files (all) - if 'yes' do not ignore special directories (like .cm) (ignore_names) - list of names to ignore (ignore_symb_dirs) - if 'yes', ignore symbolically linked dirs (to avoid recursion such as in LLVM) (add_path) - if 'yes', add path } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 list - dictionary of all files: {"file_with_full_path":{"size":.., "path":..} sizes - sizes of files (the same order) number - number of files in a current directory (needed for recursion) } """ number=0 if i.get('number','')!='': number=int(i['number']) inames=i.get('ignore_names',[]) fname=i.get('file_name','') limit=-1 if i.get('limit','')!='': limit=int(i['limit']) a={} iall=i.get('all','') pe='' if i.get('path_ext','')!='': pe=i['path_ext'] po=i.get('path','') if sys.version_info[0]<3: po=unicode(po) pattern=i.get('pattern','') if pattern!='': import fnmatch xisd=i.get('ignore_symb_dirs','') isd=False if xisd=='yes': isd=True ap=i.get('add_path','') try: dirList=os.listdir(po) except Exception as e: None else: for fn in dirList: p=os.path.join(po, fn) if iall=='yes' or fn not in cfg['special_directories']: if len(inames)==0 or fn not in inames: if os.path.isdir(p): if not isd or os.path.realpath(p)==p: r=list_all_files({'path':p, 'all':iall, 'path_ext':os.path.join(pe, fn), 'number':str(number), 'ignore_names':inames, 'pattern':pattern, 'file_name':fname, 'ignore_symb_dirs':xisd, 'add_path':ap, 'limit': limit}) if r['return']>0: return r a.update(r['list']) else: add=True if fname!='' and fname!=fn: add=False if pattern!='' and not fnmatch.fnmatch(fn, pattern): add=False if add: pg=os.path.join(pe, fn) if os.path.isfile(p): a[pg]={'size':os.stat(p).st_size} if ap=='yes': a[pg]['path']=po number=len(a) if limit!=-1 and number>=limit: break return {'return':0, 'list':a, 'number':str(number)}
[ "def", "list_all_files", "(", "i", ")", ":", "number", "=", "0", "if", "i", ".", "get", "(", "'number'", ",", "''", ")", "!=", "''", ":", "number", "=", "int", "(", "i", "[", "'number'", "]", ")", "inames", "=", "i", ".", "get", "(", "'ignore_n...
Input: { path - top level path (file_name) - search for a specific file name (pattern) - return only files with this pattern (path_ext) - path extension (needed for recursion) (limit) - limit number of files (if directories with a large number of files) (number) - current number of files (all) - if 'yes' do not ignore special directories (like .cm) (ignore_names) - list of names to ignore (ignore_symb_dirs) - if 'yes', ignore symbolically linked dirs (to avoid recursion such as in LLVM) (add_path) - if 'yes', add path } Output: { return - return code = 0, if successful > 0, if error (error) - error text if return > 0 list - dictionary of all files: {"file_with_full_path":{"size":.., "path":..} sizes - sizes of files (the same order) number - number of files in a current directory (needed for recursion) }
[ "Input", ":", "{", "path", "-", "top", "level", "path", "(", "file_name", ")", "-", "search", "for", "a", "specific", "file", "name", "(", "pattern", ")", "-", "return", "only", "files", "with", "this", "pattern", "(", "path_ext", ")", "-", "path", "...
python
train
mbj4668/pyang
pyang/translators/dsdl.py
https://github.com/mbj4668/pyang/blob/f2a5cc3142162e5b9ee4e18d154568d939ff63dd/pyang/translators/dsdl.py#L450-L475
def yang_to_xpath(self, xpe): """Transform YANG's `xpath` to a form suitable for Schematron. 1. Prefixes are added to unprefixed local names. Inside global groupings, the prefix is represented as the variable '$pref' which is substituted via Schematron abstract patterns. 2. '$root' is prepended to every absolute location path. """ if self.gg_level: pref = "$pref:" else: pref = self.prefix_stack[-1] + ":" toks = xpath_lexer.scan(xpe) prev = None res = "" for tok in toks: if (tok.type == "SLASH" and prev not in ("DOT", "DOTDOT", "RPAREN", "RBRACKET", "name", "wildcard", "prefix_test")): res += "$root" elif tok.type == "name" and ":" not in tok.value: res += pref res += tok.value if tok.type != "_whitespace": prev = tok.type return res
[ "def", "yang_to_xpath", "(", "self", ",", "xpe", ")", ":", "if", "self", ".", "gg_level", ":", "pref", "=", "\"$pref:\"", "else", ":", "pref", "=", "self", ".", "prefix_stack", "[", "-", "1", "]", "+", "\":\"", "toks", "=", "xpath_lexer", ".", "scan"...
Transform YANG's `xpath` to a form suitable for Schematron. 1. Prefixes are added to unprefixed local names. Inside global groupings, the prefix is represented as the variable '$pref' which is substituted via Schematron abstract patterns. 2. '$root' is prepended to every absolute location path.
[ "Transform", "YANG", "s", "xpath", "to", "a", "form", "suitable", "for", "Schematron", "." ]
python
train
pybel/pybel
src/pybel/parser/parse_metadata.py
https://github.com/pybel/pybel/blob/c8a7a1bdae4c475fa2a8c77f3a9a5f6d79556ca0/src/pybel/parser/parse_metadata.py#L165-L188
def handle_namespace_url(self, line: str, position: int, tokens: ParseResults) -> ParseResults: """Handle statements like ``DEFINE NAMESPACE X AS URL "Y"``. :raises: RedefinedNamespaceError :raises: pybel.resources.exc.ResourceError """ namespace = tokens['name'] self.raise_for_redefined_namespace(line, position, namespace) url = tokens['url'] self.namespace_url_dict[namespace] = url if self.skip_validation: return tokens namespace_result = self.manager.get_or_create_namespace(url) if isinstance(namespace_result, dict): self.namespace_to_term[namespace] = namespace_result self.uncachable_namespaces.add(url) else: self.namespace_to_term[namespace] = self.manager.get_namespace_encoding(url) return tokens
[ "def", "handle_namespace_url", "(", "self", ",", "line", ":", "str", ",", "position", ":", "int", ",", "tokens", ":", "ParseResults", ")", "->", "ParseResults", ":", "namespace", "=", "tokens", "[", "'name'", "]", "self", ".", "raise_for_redefined_namespace", ...
Handle statements like ``DEFINE NAMESPACE X AS URL "Y"``. :raises: RedefinedNamespaceError :raises: pybel.resources.exc.ResourceError
[ "Handle", "statements", "like", "DEFINE", "NAMESPACE", "X", "AS", "URL", "Y", "." ]
python
train
stephenmcd/django-socketio
django_socketio/events.py
https://github.com/stephenmcd/django-socketio/blob/b704f912551829a3bcf15872ba0e1baf81dea106/django_socketio/events.py#L53-L71
def send(self, request, socket, context, *args): """ When an event is sent, run all relevant handlers. Relevant handlers are those without a channel pattern when the given socket is not subscribed to any particular channel, or the handlers with a channel pattern that matches any of the channels that the given socket is subscribed to. In the case of subscribe/unsubscribe, match the channel arg being sent to the channel pattern. """ for handler, pattern in self.handlers: no_channel = not pattern and not socket.channels if self.name.endswith("subscribe") and pattern: matches = [pattern.match(args[0])] else: matches = [pattern.match(c) for c in socket.channels if pattern] if no_channel or filter(None, matches): handler(request, socket, context, *args)
[ "def", "send", "(", "self", ",", "request", ",", "socket", ",", "context", ",", "*", "args", ")", ":", "for", "handler", ",", "pattern", "in", "self", ".", "handlers", ":", "no_channel", "=", "not", "pattern", "and", "not", "socket", ".", "channels", ...
When an event is sent, run all relevant handlers. Relevant handlers are those without a channel pattern when the given socket is not subscribed to any particular channel, or the handlers with a channel pattern that matches any of the channels that the given socket is subscribed to. In the case of subscribe/unsubscribe, match the channel arg being sent to the channel pattern.
[ "When", "an", "event", "is", "sent", "run", "all", "relevant", "handlers", ".", "Relevant", "handlers", "are", "those", "without", "a", "channel", "pattern", "when", "the", "given", "socket", "is", "not", "subscribed", "to", "any", "particular", "channel", "...
python
train
Esri/ArcREST
src/arcrest/manageorg/_parameters.py
https://github.com/Esri/ArcREST/blob/ab240fde2b0200f61d4a5f6df033516e53f2f416/src/arcrest/manageorg/_parameters.py#L31-L35
def removeByIndex(self, index): """removes a user from the invitation list by position""" if index < len(self._invites) -1 and \ index >=0: self._invites.remove(index)
[ "def", "removeByIndex", "(", "self", ",", "index", ")", ":", "if", "index", "<", "len", "(", "self", ".", "_invites", ")", "-", "1", "and", "index", ">=", "0", ":", "self", ".", "_invites", ".", "remove", "(", "index", ")" ]
removes a user from the invitation list by position
[ "removes", "a", "user", "from", "the", "invitation", "list", "by", "position" ]
python
train
dcos/shakedown
shakedown/dcos/cluster.py
https://github.com/dcos/shakedown/blob/e2f9e2382788dbcd29bd18aa058b76e7c3b83b3e/shakedown/dcos/cluster.py#L93-L107
def __metadata_helper(json_path): """ Returns json for specific cluster metadata. Important to realize that this was introduced in dcos-1.9. Clusters prior to 1.9 and missing metadata will return None """ url = shakedown.dcos_url_path('dcos-metadata/{}'.format(json_path)) try: response = dcos.http.request('get', url) if response.status_code == 200: return response.json() except: pass return None
[ "def", "__metadata_helper", "(", "json_path", ")", ":", "url", "=", "shakedown", ".", "dcos_url_path", "(", "'dcos-metadata/{}'", ".", "format", "(", "json_path", ")", ")", "try", ":", "response", "=", "dcos", ".", "http", ".", "request", "(", "'get'", ","...
Returns json for specific cluster metadata. Important to realize that this was introduced in dcos-1.9. Clusters prior to 1.9 and missing metadata will return None
[ "Returns", "json", "for", "specific", "cluster", "metadata", ".", "Important", "to", "realize", "that", "this", "was", "introduced", "in", "dcos", "-", "1", ".", "9", ".", "Clusters", "prior", "to", "1", ".", "9", "and", "missing", "metadata", "will", "r...
python
train
bjmorgan/vasppy
vasppy/outcar.py
https://github.com/bjmorgan/vasppy/blob/cc2d1449697b17ee1c43715a02cddcb1139a6834/vasppy/outcar.py#L41-L53
def vasp_version_from_outcar( filename='OUTCAR' ): """ Returns the first line from a VASP OUTCAR file, to get the VASP source version string. Args: filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'. Returns: (Str): The first line read from the OUTCAR file. """ with open( filename ) as f: line = f.readline().strip() return line
[ "def", "vasp_version_from_outcar", "(", "filename", "=", "'OUTCAR'", ")", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "line", "=", "f", ".", "readline", "(", ")", ".", "strip", "(", ")", "return", "line" ]
Returns the first line from a VASP OUTCAR file, to get the VASP source version string. Args: filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'. Returns: (Str): The first line read from the OUTCAR file.
[ "Returns", "the", "first", "line", "from", "a", "VASP", "OUTCAR", "file", "to", "get", "the", "VASP", "source", "version", "string", "." ]
python
train
mrcagney/gtfstk
gtfstk/calendar.py
https://github.com/mrcagney/gtfstk/blob/c91494e6fefc02523889655a0dc92d1c0eee8d03/gtfstk/calendar.py#L108-L120
def restrict_dates(feed: "Feed", dates: List[str]) -> List[str]: """ Given a "Feed" and a date (YYYYMMDD string) or list of dates, coerce the date/dates into a list and drop the dates not in ``feed.get_dates()``, preserving the original order of ``dates``. Intended as a helper function. """ # Coerce string to set if isinstance(dates, str): dates = [dates] # Restrict return [d for d in dates if d in feed.get_dates()]
[ "def", "restrict_dates", "(", "feed", ":", "\"Feed\"", ",", "dates", ":", "List", "[", "str", "]", ")", "->", "List", "[", "str", "]", ":", "# Coerce string to set", "if", "isinstance", "(", "dates", ",", "str", ")", ":", "dates", "=", "[", "dates", ...
Given a "Feed" and a date (YYYYMMDD string) or list of dates, coerce the date/dates into a list and drop the dates not in ``feed.get_dates()``, preserving the original order of ``dates``. Intended as a helper function.
[ "Given", "a", "Feed", "and", "a", "date", "(", "YYYYMMDD", "string", ")", "or", "list", "of", "dates", "coerce", "the", "date", "/", "dates", "into", "a", "list", "and", "drop", "the", "dates", "not", "in", "feed", ".", "get_dates", "()", "preserving",...
python
train
crocs-muni/roca
roca/detect.py
https://github.com/crocs-muni/roca/blob/74ad6ce63c428d83dcffce9c5e26ef7b9e30faa5/roca/detect.py#L1808-L1840
def process_apk(self, data, name): """ Processes Android application :param data: :param name: :return: """ try: from apk_parse.apk import APK except Exception as e: logger.warning('Could not import apk_parse, try running: pip install apk_parse_ph4') return [TestResult(fname=name, type='apk-pem-cert', error='cannot-import')] ret = [] try: from cryptography.x509.base import load_der_x509_certificate apkf = APK(data, process_now=False, process_file_types=False, raw=True, temp_dir=self.args.tmp_dir) apkf.process() self.num_apk += 1 pem = apkf.cert_pem aux = {'subtype': 'apk'} x509 = load_der_x509_certificate(pem_to_der(pem), self.get_backend()) sub = self.process_x509(x509, name=name, idx=0, data=data, pem=True, source='apk-pem-cert', aux=aux) ret.append(sub) except Exception as e: logger.debug('Exception in processing APK %s : %s' % (name, e)) self.trace_logger.log(e) return ret
[ "def", "process_apk", "(", "self", ",", "data", ",", "name", ")", ":", "try", ":", "from", "apk_parse", ".", "apk", "import", "APK", "except", "Exception", "as", "e", ":", "logger", ".", "warning", "(", "'Could not import apk_parse, try running: pip install apk_...
Processes Android application :param data: :param name: :return:
[ "Processes", "Android", "application", ":", "param", "data", ":", ":", "param", "name", ":", ":", "return", ":" ]
python
train
myusuf3/delorean
delorean/interface.py
https://github.com/myusuf3/delorean/blob/3e8a7b8cfd4c26546f62bde2f34002893adfa08a/delorean/interface.py#L108-L113
def range_daily(start=None, stop=None, timezone='UTC', count=None): """ This an alternative way to generating sets of Delorean objects with DAILY stops """ return stops(start=start, stop=stop, freq=DAILY, timezone=timezone, count=count)
[ "def", "range_daily", "(", "start", "=", "None", ",", "stop", "=", "None", ",", "timezone", "=", "'UTC'", ",", "count", "=", "None", ")", ":", "return", "stops", "(", "start", "=", "start", ",", "stop", "=", "stop", ",", "freq", "=", "DAILY", ",", ...
This an alternative way to generating sets of Delorean objects with DAILY stops
[ "This", "an", "alternative", "way", "to", "generating", "sets", "of", "Delorean", "objects", "with", "DAILY", "stops" ]
python
train
glue-viz/glue-vispy-viewers
glue_vispy_viewers/extern/vispy/io/mesh.py
https://github.com/glue-viz/glue-vispy-viewers/blob/54a4351d98c1f90dfb1a557d1b447c1f57470eea/glue_vispy_viewers/extern/vispy/io/mesh.py#L13-L43
def read_mesh(fname): """Read mesh data from file. Parameters ---------- fname : str File name to read. Format will be inferred from the filename. Currently only '.obj' and '.obj.gz' are supported. Returns ------- vertices : array Vertices. faces : array | None Triangle face definitions. normals : array Normals for the mesh. texcoords : array | None Texture coordinates. """ # Check format fmt = op.splitext(fname)[1].lower() if fmt == '.gz': fmt = op.splitext(op.splitext(fname)[0])[1].lower() if fmt in ('.obj'): return WavefrontReader.read(fname) elif not format: raise ValueError('read_mesh needs could not determine format.') else: raise ValueError('read_mesh does not understand format %s.' % fmt)
[ "def", "read_mesh", "(", "fname", ")", ":", "# Check format", "fmt", "=", "op", ".", "splitext", "(", "fname", ")", "[", "1", "]", ".", "lower", "(", ")", "if", "fmt", "==", "'.gz'", ":", "fmt", "=", "op", ".", "splitext", "(", "op", ".", "splite...
Read mesh data from file. Parameters ---------- fname : str File name to read. Format will be inferred from the filename. Currently only '.obj' and '.obj.gz' are supported. Returns ------- vertices : array Vertices. faces : array | None Triangle face definitions. normals : array Normals for the mesh. texcoords : array | None Texture coordinates.
[ "Read", "mesh", "data", "from", "file", "." ]
python
train
openid/python-openid
openid/message.py
https://github.com/openid/python-openid/blob/f7e13536f0d1828d3cef5ae7a7b55cabadff37fc/openid/message.py#L233-L243
def setOpenIDNamespace(self, openid_ns_uri, implicit): """Set the OpenID namespace URI used in this message. @raises InvalidOpenIDNamespace: if the namespace is not in L{Message.allowed_openid_namespaces} """ if openid_ns_uri not in self.allowed_openid_namespaces: raise InvalidOpenIDNamespace(openid_ns_uri) self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit) self._openid_ns_uri = openid_ns_uri
[ "def", "setOpenIDNamespace", "(", "self", ",", "openid_ns_uri", ",", "implicit", ")", ":", "if", "openid_ns_uri", "not", "in", "self", ".", "allowed_openid_namespaces", ":", "raise", "InvalidOpenIDNamespace", "(", "openid_ns_uri", ")", "self", ".", "namespaces", "...
Set the OpenID namespace URI used in this message. @raises InvalidOpenIDNamespace: if the namespace is not in L{Message.allowed_openid_namespaces}
[ "Set", "the", "OpenID", "namespace", "URI", "used", "in", "this", "message", "." ]
python
train
toros-astro/corral
corral/qa.py
https://github.com/toros-astro/corral/blob/75474b38ff366330d33644461a902d07374a5bbc/corral/qa.py#L380-L403
def default_qai(qareport): """QAI = 2 * (TP * (PT/PNC) * COV) / (1 + exp(MSE/tau)) Where: TP: If all tests passes is 1, 0 otherwise. PT: Processors and commands tested. PCN: The number number of processors (Loader, Steps and Alerts) and commands. COV: The code coverage (between 0 and 1). MSE: The Maintainability and Style Errors. tau: Tolerance of style errors per file """ TP = 1. if qareport.is_test_sucess else 0. PCN = qareport.processors_number + qareport.commands_number PT_div_PCN = float(qareport.pc_tested_number) / PCN COV = qareport.coverage_line_rate tau = get_tau() total_tau = float(tau) * len(qareport.project_modules) style = 1 + math.exp(qareport.style_errors / total_tau) result = (2 * TP * PT_div_PCN * COV) / style return result
[ "def", "default_qai", "(", "qareport", ")", ":", "TP", "=", "1.", "if", "qareport", ".", "is_test_sucess", "else", "0.", "PCN", "=", "qareport", ".", "processors_number", "+", "qareport", ".", "commands_number", "PT_div_PCN", "=", "float", "(", "qareport", "...
QAI = 2 * (TP * (PT/PNC) * COV) / (1 + exp(MSE/tau)) Where: TP: If all tests passes is 1, 0 otherwise. PT: Processors and commands tested. PCN: The number number of processors (Loader, Steps and Alerts) and commands. COV: The code coverage (between 0 and 1). MSE: The Maintainability and Style Errors. tau: Tolerance of style errors per file
[ "QAI", "=", "2", "*", "(", "TP", "*", "(", "PT", "/", "PNC", ")", "*", "COV", ")", "/", "(", "1", "+", "exp", "(", "MSE", "/", "tau", "))" ]
python
train
jaredLunde/redis_structures
redis_structures/__init__.py
https://github.com/jaredLunde/redis_structures/blob/b9cce5f5c85db5e12c292633ff8d04e3ae053294/redis_structures/__init__.py#L738-L745
def incr(self, key, by=1): """ :see::meth:RedisMap.incr """ pipe = self._client.pipeline(transaction=False) pipe.incr(self.get_key(key), by) if key not in self: pipe.hincrby(self._bucket_key, self.key_prefix, 1) result = pipe.execute() return result[0]
[ "def", "incr", "(", "self", ",", "key", ",", "by", "=", "1", ")", ":", "pipe", "=", "self", ".", "_client", ".", "pipeline", "(", "transaction", "=", "False", ")", "pipe", ".", "incr", "(", "self", ".", "get_key", "(", "key", ")", ",", "by", ")...
:see::meth:RedisMap.incr
[ ":", "see", "::", "meth", ":", "RedisMap", ".", "incr" ]
python
train
TkTech/Jawa
jawa/attribute.py
https://github.com/TkTech/Jawa/blob/94c8424e699029ac33fbc0e866fff0ecb2742289/jawa/attribute.py#L103-L122
def pack(self, out: IO): """ Write the AttributeTable to the file-like object `out`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when saving a ClassFile. :param out: Any file-like object providing `write()` """ out.write(pack('>H', len(self._table))) for attribute in self: info = attribute.pack() out.write(pack( '>HI', attribute.name.index, len(info) )) out.write(info)
[ "def", "pack", "(", "self", ",", "out", ":", "IO", ")", ":", "out", ".", "write", "(", "pack", "(", "'>H'", ",", "len", "(", "self", ".", "_table", ")", ")", ")", "for", "attribute", "in", "self", ":", "info", "=", "attribute", ".", "pack", "("...
Write the AttributeTable to the file-like object `out`. .. note:: Advanced usage only. You will typically never need to call this method as it will be called for you when saving a ClassFile. :param out: Any file-like object providing `write()`
[ "Write", "the", "AttributeTable", "to", "the", "file", "-", "like", "object", "out", "." ]
python
train
Equitable/trump
trump/orm.py
https://github.com/Equitable/trump/blob/a2802692bc642fa32096374159eea7ceca2947b4/trump/orm.py#L1436-L1450
def _all_datatable_data(self): """ Returns ------- A list of tuples representing rows from all columns of the datatable, sorted accordingly. """ dtbl = self.datatable objs = object_session(self) imcols = [dtbl.c.indx, dtbl.c.final, dtbl.c.override_feed000, dtbl.c.failsafe_feed999] cols = imcols[:3] + [c for c in dtbl.c if c not in (imcols)] + [imcols[3]] if isinstance(dtbl, Table): return objs.query(*cols).order_by(dtbl.c.indx).all() else: raise Exception("Symbol has no datatable")
[ "def", "_all_datatable_data", "(", "self", ")", ":", "dtbl", "=", "self", ".", "datatable", "objs", "=", "object_session", "(", "self", ")", "imcols", "=", "[", "dtbl", ".", "c", ".", "indx", ",", "dtbl", ".", "c", ".", "final", ",", "dtbl", ".", "...
Returns ------- A list of tuples representing rows from all columns of the datatable, sorted accordingly.
[ "Returns", "-------", "A", "list", "of", "tuples", "representing", "rows", "from", "all", "columns", "of", "the", "datatable", "sorted", "accordingly", "." ]
python
train
daboth/pagan
pagan/generator.py
https://github.com/daboth/pagan/blob/1e6d31f78e312d242751e70566ca9a6278784915/pagan/generator.py#L134-L140
def create_torso_layer(aspect, ip): '''Reads the TORSO.pgn file and creates the torso layer.''' layer = [] if 'TOP' in aspect: layer = pgnreader.parse_pagan_file(FILE_TORSO, ip, invert=False, sym=True) return layer
[ "def", "create_torso_layer", "(", "aspect", ",", "ip", ")", ":", "layer", "=", "[", "]", "if", "'TOP'", "in", "aspect", ":", "layer", "=", "pgnreader", ".", "parse_pagan_file", "(", "FILE_TORSO", ",", "ip", ",", "invert", "=", "False", ",", "sym", "=",...
Reads the TORSO.pgn file and creates the torso layer.
[ "Reads", "the", "TORSO", ".", "pgn", "file", "and", "creates", "the", "torso", "layer", "." ]
python
train
guaix-ucm/numina
numina/core/recipeinout.py
https://github.com/guaix-ucm/numina/blob/6c829495df8937f77c2de9383c1038ffb3e713e3/numina/core/recipeinout.py#L44-L61
def _finalize(self, all_msg_errors=None): """Access all the instance descriptors This wil trigger an exception if a required parameter is not set """ if all_msg_errors is None: all_msg_errors = [] for key in self.stored(): try: getattr(self, key) except (ValueError, TypeError) as err: all_msg_errors.append(err.args[0]) # Raises a list of all the missing entries if all_msg_errors: raise ValueError(all_msg_errors)
[ "def", "_finalize", "(", "self", ",", "all_msg_errors", "=", "None", ")", ":", "if", "all_msg_errors", "is", "None", ":", "all_msg_errors", "=", "[", "]", "for", "key", "in", "self", ".", "stored", "(", ")", ":", "try", ":", "getattr", "(", "self", "...
Access all the instance descriptors This wil trigger an exception if a required parameter is not set
[ "Access", "all", "the", "instance", "descriptors" ]
python
train
Shizmob/pydle
pydle/client.py
https://github.com/Shizmob/pydle/blob/7ec7d65d097318ed0bcdc5d8401470287d8c7cf7/pydle/client.py#L426-L429
async def on_unknown(self, message): """ Unknown command. """ self.logger.warning('Unknown command: [%s] %s %s', message.source, message.command, message.params)
[ "async", "def", "on_unknown", "(", "self", ",", "message", ")", ":", "self", ".", "logger", ".", "warning", "(", "'Unknown command: [%s] %s %s'", ",", "message", ".", "source", ",", "message", ".", "command", ",", "message", ".", "params", ")" ]
Unknown command.
[ "Unknown", "command", "." ]
python
train
opendatateam/udata
udata/harvest/actions.py
https://github.com/opendatateam/udata/blob/f016585af94b0ff6bd73738c700324adc8ba7f8f/udata/harvest/actions.py#L236-L276
def attach(domain, filename): '''Attach existing dataset to their harvest remote id before harvesting. The expected csv file format is the following: - a column with header "local" and the local IDs or slugs - a column with header "remote" and the remote IDs The delimiter should be ";". columns order and extras columns does not matter ''' count = 0 errors = 0 with open(filename) as csvfile: reader = csv.DictReader(csvfile, delimiter=b';', quotechar=b'"') for row in reader: try: dataset = Dataset.objects.get(id=ObjectId(row['local'])) except: # noqa (Never stop on failure) log.warning('Unable to attach dataset : %s', row['local']) errors += 1 continue # Detach previously attached dataset Dataset.objects(**{ 'extras__harvest:domain': domain, 'extras__harvest:remote_id': row['remote'] }).update(**{ 'unset__extras__harvest:domain': True, 'unset__extras__harvest:remote_id': True }) dataset.extras['harvest:domain'] = domain dataset.extras['harvest:remote_id'] = row['remote'] dataset.last_modified = datetime.now() dataset.save() count += 1 return AttachResult(count, errors)
[ "def", "attach", "(", "domain", ",", "filename", ")", ":", "count", "=", "0", "errors", "=", "0", "with", "open", "(", "filename", ")", "as", "csvfile", ":", "reader", "=", "csv", ".", "DictReader", "(", "csvfile", ",", "delimiter", "=", "b';'", ",",...
Attach existing dataset to their harvest remote id before harvesting. The expected csv file format is the following: - a column with header "local" and the local IDs or slugs - a column with header "remote" and the remote IDs The delimiter should be ";". columns order and extras columns does not matter
[ "Attach", "existing", "dataset", "to", "their", "harvest", "remote", "id", "before", "harvesting", "." ]
python
train
apache/airflow
airflow/www/decorators.py
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/www/decorators.py#L97-L127
def has_dag_access(**dag_kwargs): """ Decorator to check whether the user has read / write permission on the dag. """ def decorator(f): @functools.wraps(f) def wrapper(self, *args, **kwargs): has_access = self.appbuilder.sm.has_access dag_id = request.args.get('dag_id') # if it is false, we need to check whether user has write access on the dag can_dag_edit = dag_kwargs.get('can_dag_edit', False) # 1. check whether the user has can_dag_edit permissions on all_dags # 2. if 1 false, check whether the user # has can_dag_edit permissions on the dag # 3. if 2 false, check whether it is can_dag_read view, # and whether user has the permissions if ( has_access('can_dag_edit', 'all_dags') or has_access('can_dag_edit', dag_id) or (not can_dag_edit and (has_access('can_dag_read', 'all_dags') or has_access('can_dag_read', dag_id)))): return f(self, *args, **kwargs) else: flash("Access is Denied", "danger") return redirect(url_for(self.appbuilder.sm.auth_view. __class__.__name__ + ".login")) return wrapper return decorator
[ "def", "has_dag_access", "(", "*", "*", "dag_kwargs", ")", ":", "def", "decorator", "(", "f", ")", ":", "@", "functools", ".", "wraps", "(", "f", ")", "def", "wrapper", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "has_access",...
Decorator to check whether the user has read / write permission on the dag.
[ "Decorator", "to", "check", "whether", "the", "user", "has", "read", "/", "write", "permission", "on", "the", "dag", "." ]
python
test
raamana/mrivis
mrivis/utils.py
https://github.com/raamana/mrivis/blob/199ad096b8a1d825f69109e7218a81b2f1cec756/mrivis/utils.py#L264-L299
def scale_0to1(image_in, exclude_outliers_below=False, exclude_outliers_above=False): """Scale the two images to [0, 1] based on min/max from both. Parameters ----------- image_in : ndarray Input image exclude_outliers_{below,above} : float Lower/upper limit, a value between 0 and 100. Returns ------- scaled_image : ndarray clipped and/or scaled image """ min_value = image_in.min() max_value = image_in.max() # making a copy to ensure no side-effects image = image_in.copy() if exclude_outliers_below: perctl = float(exclude_outliers_below) image[image < np.percentile(image, perctl)] = min_value if exclude_outliers_above: perctl = float(exclude_outliers_above) image[image > np.percentile(image, 100.0 - perctl)] = max_value image = (image - min_value) / (max_value - min_value) return image
[ "def", "scale_0to1", "(", "image_in", ",", "exclude_outliers_below", "=", "False", ",", "exclude_outliers_above", "=", "False", ")", ":", "min_value", "=", "image_in", ".", "min", "(", ")", "max_value", "=", "image_in", ".", "max", "(", ")", "# making a copy t...
Scale the two images to [0, 1] based on min/max from both. Parameters ----------- image_in : ndarray Input image exclude_outliers_{below,above} : float Lower/upper limit, a value between 0 and 100. Returns ------- scaled_image : ndarray clipped and/or scaled image
[ "Scale", "the", "two", "images", "to", "[", "0", "1", "]", "based", "on", "min", "/", "max", "from", "both", "." ]
python
train
jobovy/galpy
galpy/orbit/OrbitTop.py
https://github.com/jobovy/galpy/blob/9c5b9fe65d58835624dffe432be282060918ee08/galpy/orbit/OrbitTop.py#L643-L666
def pmll(self,*args,**kwargs): """ NAME: pmll PURPOSE: return proper motion in Galactic longitude (in mas/yr) INPUT: t - (optional) time at which to get pmll obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer (in kpc and km/s) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) vo= velocity in km/s corresponding to v=1. (default=Object-wide default) OUTPUT: pm_l(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU) """ _check_roSet(self,kwargs,'pmll') _check_voSet(self,kwargs,'pmll') lbdvrpmllpmbb= self._lbdvrpmllpmbb(*args,**kwargs) return lbdvrpmllpmbb[:,4]
[ "def", "pmll", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "_check_roSet", "(", "self", ",", "kwargs", ",", "'pmll'", ")", "_check_voSet", "(", "self", ",", "kwargs", ",", "'pmll'", ")", "lbdvrpmllpmbb", "=", "self", ".", "_lbdv...
NAME: pmll PURPOSE: return proper motion in Galactic longitude (in mas/yr) INPUT: t - (optional) time at which to get pmll obs=[X,Y,Z,vx,vy,vz] - (optional) position and velocity of observer (in kpc and km/s) (default=Object-wide default) OR Orbit object that corresponds to the orbit of the observer Y is ignored and always assumed to be zero ro= distance in kpc corresponding to R=1. (default=Object-wide default) vo= velocity in km/s corresponding to v=1. (default=Object-wide default) OUTPUT: pm_l(t) in mas/yr HISTORY: 2011-02-24 - Written - Bovy (NYU)
[ "NAME", ":", "pmll", "PURPOSE", ":", "return", "proper", "motion", "in", "Galactic", "longitude", "(", "in", "mas", "/", "yr", ")", "INPUT", ":", "t", "-", "(", "optional", ")", "time", "at", "which", "to", "get", "pmll", "obs", "=", "[", "X", "Y",...
python
train
Oneiroe/PySimpleAutomata
PySimpleAutomata/DFA.py
https://github.com/Oneiroe/PySimpleAutomata/blob/0f9f2705fd8ddd5d8118bc31552a640f5d00c359/PySimpleAutomata/DFA.py#L466-L544
def dfa_projection(dfa: dict, symbols_to_remove: set) -> dict: """ Returns a NFA that reads the language recognized by the input DFA where all the symbols in **symbols_to_project** are projected out of the alphabet. Projection in a DFA is the operation that existentially removes from a word all occurrence of symbols in a set X. Given a dfa :math:`A = (Σ, S, s_0 , ρ, F )`, we can define an NFA :math:`A_{πX}` that recognizes the language :math:`πX(L(A))` as :math:`A_{πX}= ( Σ−X, S, S_0 , ρ_X , F )` where • :math:`S_0 = \{s | (s_0 , s) ∈ ε_X \}` • :math:`(s,a,s_y ) ∈ ρ_X` iff there exist :math:`(t, t_y)` s.t. :math:`(s,t) ∈ ε_X , t_y = ρ(t,a)` and :math:`(t_y , s_y ) ∈ ε_X` :param dict dfa: input DFA; :param set symbols_to_remove: set containing symbols ∈ dfa[ 'alphabet'] to be projected out from DFA. :return: *(dict)* representing a NFA. """ nfa = { 'alphabet': dfa['alphabet'].difference(symbols_to_remove), 'states': dfa['states'].copy(), 'initial_states': {dfa['initial_state']}, 'accepting_states': dfa['accepting_states'].copy(), 'transitions': dict() } current_nfa_transitions = None current_e_x = None e_x = dict() # equivalence relation dictionary # while no more changes are possible while current_nfa_transitions != nfa['transitions'] or current_e_x != e_x: current_nfa_transitions = nfa['transitions'].copy() current_e_x = deepcopy(e_x) for (state, a) in dfa['transitions']: next_state = dfa['transitions'][state, a] if a in symbols_to_remove: # mark next_state as equivalent to state e_x.setdefault(state, set()).add(next_state) app_set = set() for equivalent in e_x[state]: # mark states equivalent to next_states also to state if equivalent in e_x: app_set.update(e_x[equivalent]) # add all transitions of equivalent states to state for act in nfa['alphabet']: if (equivalent, act) in dfa['transitions']: equivalent_next = dfa['transitions'][ equivalent, act] nfa['transitions'].setdefault( (state, act), set()).add(equivalent_next) # if equivalent_next has equivalent states if equivalent_next in e_x: # the transition leads also to these states nfa['transitions'][state, act].update( e_x[equivalent_next]) e_x[state].update(app_set) else: # add the transition to the NFA nfa['transitions'].setdefault((state, a), set()).add( next_state) # if next_state has equivalent states if next_state in e_x: # the same transition arrive also to all these other states nfa['transitions'][state, a].update(e_x[next_state]) # Add all state equivalent to the initial one to NFA initial states set if dfa['initial_state'] in e_x: nfa['initial_states'].update(e_x[dfa['initial_state']]) return nfa
[ "def", "dfa_projection", "(", "dfa", ":", "dict", ",", "symbols_to_remove", ":", "set", ")", "->", "dict", ":", "nfa", "=", "{", "'alphabet'", ":", "dfa", "[", "'alphabet'", "]", ".", "difference", "(", "symbols_to_remove", ")", ",", "'states'", ":", "df...
Returns a NFA that reads the language recognized by the input DFA where all the symbols in **symbols_to_project** are projected out of the alphabet. Projection in a DFA is the operation that existentially removes from a word all occurrence of symbols in a set X. Given a dfa :math:`A = (Σ, S, s_0 , ρ, F )`, we can define an NFA :math:`A_{πX}` that recognizes the language :math:`πX(L(A))` as :math:`A_{πX}= ( Σ−X, S, S_0 , ρ_X , F )` where • :math:`S_0 = \{s | (s_0 , s) ∈ ε_X \}` • :math:`(s,a,s_y ) ∈ ρ_X` iff there exist :math:`(t, t_y)` s.t. :math:`(s,t) ∈ ε_X , t_y = ρ(t,a)` and :math:`(t_y , s_y ) ∈ ε_X` :param dict dfa: input DFA; :param set symbols_to_remove: set containing symbols ∈ dfa[ 'alphabet'] to be projected out from DFA. :return: *(dict)* representing a NFA.
[ "Returns", "a", "NFA", "that", "reads", "the", "language", "recognized", "by", "the", "input", "DFA", "where", "all", "the", "symbols", "in", "**", "symbols_to_project", "**", "are", "projected", "out", "of", "the", "alphabet", "." ]
python
train
Parsely/birding
src/birding/spout.py
https://github.com/Parsely/birding/blob/c7f6eee56424234e361b1a455595de202e744dac/src/birding/spout.py#L11-L15
def DispatchSpout(*a, **kw): """Factory to dispatch spout class based on config.""" spout_class_name = get_config()['Spout'] spout_class = import_name(spout_class_name, default_ns='birding.spout') return spout_class(*a, **kw)
[ "def", "DispatchSpout", "(", "*", "a", ",", "*", "*", "kw", ")", ":", "spout_class_name", "=", "get_config", "(", ")", "[", "'Spout'", "]", "spout_class", "=", "import_name", "(", "spout_class_name", ",", "default_ns", "=", "'birding.spout'", ")", "return", ...
Factory to dispatch spout class based on config.
[ "Factory", "to", "dispatch", "spout", "class", "based", "on", "config", "." ]
python
train
tomplus/kubernetes_asyncio
kubernetes_asyncio/config/kube_config.py
https://github.com/tomplus/kubernetes_asyncio/blob/f9ab15317ec921409714c7afef11aeb0f579985d/kubernetes_asyncio/config/kube_config.py#L114-L125
def as_data(self): """If obj[%data_key_name] exists, Return obj[%data_key_name] otherwise base64 encoded string of obj[%file_key_name] file content.""" use_file_if_no_data = not self._data and self._file if use_file_if_no_data: with open(self._file) as f: if self._base64_file_content: self._data = bytes.decode( base64.standard_b64encode(str.encode(f.read()))) else: self._data = f.read() return self._data
[ "def", "as_data", "(", "self", ")", ":", "use_file_if_no_data", "=", "not", "self", ".", "_data", "and", "self", ".", "_file", "if", "use_file_if_no_data", ":", "with", "open", "(", "self", ".", "_file", ")", "as", "f", ":", "if", "self", ".", "_base64...
If obj[%data_key_name] exists, Return obj[%data_key_name] otherwise base64 encoded string of obj[%file_key_name] file content.
[ "If", "obj", "[", "%data_key_name", "]", "exists", "Return", "obj", "[", "%data_key_name", "]", "otherwise", "base64", "encoded", "string", "of", "obj", "[", "%file_key_name", "]", "file", "content", "." ]
python
train
posativ/isso
isso/wsgi.py
https://github.com/posativ/isso/blob/78997f491044b7d694ac7170edc32030544095b7/isso/wsgi.py#L27-L47
def host(environ): # pragma: no cover """ Reconstruct host from environment. A modified version of http://www.python.org/dev/peps/pep-0333/#url-reconstruction """ url = environ['wsgi.url_scheme'] + '://' if environ.get('HTTP_HOST'): url += environ['HTTP_HOST'] else: url += environ['SERVER_NAME'] if environ['wsgi.url_scheme'] == 'https': if environ['SERVER_PORT'] != '443': url += ':' + environ['SERVER_PORT'] else: if environ['SERVER_PORT'] != '80': url += ':' + environ['SERVER_PORT'] return url + quote(environ.get('SCRIPT_NAME', ''))
[ "def", "host", "(", "environ", ")", ":", "# pragma: no cover", "url", "=", "environ", "[", "'wsgi.url_scheme'", "]", "+", "'://'", "if", "environ", ".", "get", "(", "'HTTP_HOST'", ")", ":", "url", "+=", "environ", "[", "'HTTP_HOST'", "]", "else", ":", "u...
Reconstruct host from environment. A modified version of http://www.python.org/dev/peps/pep-0333/#url-reconstruction
[ "Reconstruct", "host", "from", "environment", ".", "A", "modified", "version", "of", "http", ":", "//", "www", ".", "python", ".", "org", "/", "dev", "/", "peps", "/", "pep", "-", "0333", "/", "#url", "-", "reconstruction" ]
python
train
aegirhall/console-menu
consolemenu/console_menu.py
https://github.com/aegirhall/console-menu/blob/1a28959d6f1dd6ac79c87b11efd8529d05532422/consolemenu/console_menu.py#L116-L130
def remove_item(self, item): """ Remove the specified item from the menu. Args: item (MenuItem): the item to be removed. Returns: bool: True if the item was removed; False otherwise. """ for idx, _item in enumerate(self.items): if item == _item: del self.items[idx] return True return False
[ "def", "remove_item", "(", "self", ",", "item", ")", ":", "for", "idx", ",", "_item", "in", "enumerate", "(", "self", ".", "items", ")", ":", "if", "item", "==", "_item", ":", "del", "self", ".", "items", "[", "idx", "]", "return", "True", "return"...
Remove the specified item from the menu. Args: item (MenuItem): the item to be removed. Returns: bool: True if the item was removed; False otherwise.
[ "Remove", "the", "specified", "item", "from", "the", "menu", "." ]
python
train
bufferapp/pipub
pipub/cli.py
https://github.com/bufferapp/pipub/blob/1270b2cc3b72ddbe57874757dcf5537d3d36e189/pipub/cli.py#L6-L12
def standard_input(): """Generator that yields lines from standard input.""" with click.get_text_stream("stdin") as stdin: while stdin.readable(): line = stdin.readline() if line: yield line.strip().encode("utf-8")
[ "def", "standard_input", "(", ")", ":", "with", "click", ".", "get_text_stream", "(", "\"stdin\"", ")", "as", "stdin", ":", "while", "stdin", ".", "readable", "(", ")", ":", "line", "=", "stdin", ".", "readline", "(", ")", "if", "line", ":", "yield", ...
Generator that yields lines from standard input.
[ "Generator", "that", "yields", "lines", "from", "standard", "input", "." ]
python
train