repo
stringlengths
7
54
path
stringlengths
4
192
url
stringlengths
87
284
code
stringlengths
78
104k
code_tokens
list
docstring
stringlengths
1
46.9k
docstring_tokens
list
language
stringclasses
1 value
partition
stringclasses
3 values
frmdstryr/enamlx
enamlx/qt/qt_graphics_view.py
https://github.com/frmdstryr/enamlx/blob/9582e29c88dc0c0340f912b49168b7307a47ed4f/enamlx/qt/qt_graphics_view.py#L653-L658
def mouseReleaseEvent(self, event): """ Handle the mouse release event for the drag operation. """ self.declaration.mouse_release_event(event) super(QtGraphicsView, self).mouseReleaseEvent(event)
[ "def", "mouseReleaseEvent", "(", "self", ",", "event", ")", ":", "self", ".", "declaration", ".", "mouse_release_event", "(", "event", ")", "super", "(", "QtGraphicsView", ",", "self", ")", ".", "mouseReleaseEvent", "(", "event", ")" ]
Handle the mouse release event for the drag operation.
[ "Handle", "the", "mouse", "release", "event", "for", "the", "drag", "operation", "." ]
python
train
neo4j-drivers/neobolt
neobolt/impl/python/direct.py
https://github.com/neo4j-drivers/neobolt/blob/724569d76e85777c4f5e30e8d0a18116bda4d8cd/neobolt/impl/python/direct.py#L341-L381
def _fetch(self): """ Receive at least one message from the server, if available. :return: 2-tuple of number of detail messages and number of summary messages fetched """ if self.closed(): raise self.Error("Failed to read from closed connection {!r}".format(self.server.address)) if self.defunct(): raise self.Error("Failed to read from defunct connection {!r}".format(self.server.address)) if not self.responses: return 0, 0 self._receive() details, summary_signature, summary_metadata = self._unpack() if details: log_debug("[#%04X] S: RECORD * %d", self.local_port, len(details)) # TODO self.responses[0].on_records(details) if summary_signature is None: return len(details), 0 response = self.responses.popleft() response.complete = True if summary_signature == b"\x70": log_debug("[#%04X] S: SUCCESS %r", self.local_port, summary_metadata) response.on_success(summary_metadata or {}) elif summary_signature == b"\x7E": self._last_run_statement = None log_debug("[#%04X] S: IGNORED", self.local_port) response.on_ignored(summary_metadata or {}) elif summary_signature == b"\x7F": self._last_run_statement = None log_debug("[#%04X] S: FAILURE %r", self.local_port, summary_metadata) response.on_failure(summary_metadata or {}) else: self._last_run_statement = None raise ProtocolError("Unexpected response message with signature %02X" % summary_signature) return len(details), 1
[ "def", "_fetch", "(", "self", ")", ":", "if", "self", ".", "closed", "(", ")", ":", "raise", "self", ".", "Error", "(", "\"Failed to read from closed connection {!r}\"", ".", "format", "(", "self", ".", "server", ".", "address", ")", ")", "if", "self", "...
Receive at least one message from the server, if available. :return: 2-tuple of number of detail messages and number of summary messages fetched
[ "Receive", "at", "least", "one", "message", "from", "the", "server", "if", "available", "." ]
python
train
mikedh/trimesh
trimesh/grouping.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/grouping.py#L529-L555
def boolean_rows(a, b, operation=np.intersect1d): """ Find the rows in two arrays which occur in both rows. Parameters --------- a: (n, d) int Array with row vectors b: (m, d) int Array with row vectors operation : function Numpy boolean set operation function: -np.intersect1d -np.setdiff1d Returns -------- shared: (p, d) array containing rows in both a and b """ a = np.asanyarray(a, dtype=np.int64) b = np.asanyarray(b, dtype=np.int64) av = a.view([('', a.dtype)] * a.shape[1]).ravel() bv = b.view([('', b.dtype)] * b.shape[1]).ravel() shared = operation(av, bv).view(a.dtype).reshape(-1, a.shape[1]) return shared
[ "def", "boolean_rows", "(", "a", ",", "b", ",", "operation", "=", "np", ".", "intersect1d", ")", ":", "a", "=", "np", ".", "asanyarray", "(", "a", ",", "dtype", "=", "np", ".", "int64", ")", "b", "=", "np", ".", "asanyarray", "(", "b", ",", "dt...
Find the rows in two arrays which occur in both rows. Parameters --------- a: (n, d) int Array with row vectors b: (m, d) int Array with row vectors operation : function Numpy boolean set operation function: -np.intersect1d -np.setdiff1d Returns -------- shared: (p, d) array containing rows in both a and b
[ "Find", "the", "rows", "in", "two", "arrays", "which", "occur", "in", "both", "rows", "." ]
python
train
luckydonald/pytgbot
code_generation/output/pytgbot/api_types/receivable/peer.py
https://github.com/luckydonald/pytgbot/blob/67f4b5a1510d4583d40b5477e876b1ef0eb8971b/code_generation/output/pytgbot/api_types/receivable/peer.py#L327-L369
def to_array(self): """ Serializes this Chat to a dictionary. :return: dictionary representation of this object. :rtype: dict """ array = super(Chat, self).to_array() array['id'] = int(self.id) # type int array['type'] = u(self.type) # py2: type unicode, py3: type str if self.title is not None: array['title'] = u(self.title) # py2: type unicode, py3: type str if self.username is not None: array['username'] = u(self.username) # py2: type unicode, py3: type str if self.first_name is not None: array['first_name'] = u(self.first_name) # py2: type unicode, py3: type str if self.last_name is not None: array['last_name'] = u(self.last_name) # py2: type unicode, py3: type str if self.all_members_are_administrators is not None: array['all_members_are_administrators'] = bool(self.all_members_are_administrators) # type bool if self.photo is not None: array['photo'] = self.photo.to_array() # type ChatPhoto if self.description is not None: array['description'] = u(self.description) # py2: type unicode, py3: type str if self.invite_link is not None: array['invite_link'] = u(self.invite_link) # py2: type unicode, py3: type str if self.pinned_message is not None: array['pinned_message'] = self.pinned_message.to_array() # type Message if self.sticker_set_name is not None: array['sticker_set_name'] = u(self.sticker_set_name) # py2: type unicode, py3: type str if self.can_set_sticker_set is not None: array['can_set_sticker_set'] = bool(self.can_set_sticker_set) # type bool return array
[ "def", "to_array", "(", "self", ")", ":", "array", "=", "super", "(", "Chat", ",", "self", ")", ".", "to_array", "(", ")", "array", "[", "'id'", "]", "=", "int", "(", "self", ".", "id", ")", "# type int", "array", "[", "'type'", "]", "=", "u", ...
Serializes this Chat to a dictionary. :return: dictionary representation of this object. :rtype: dict
[ "Serializes", "this", "Chat", "to", "a", "dictionary", "." ]
python
train
tgbugs/pyontutils
ilxutils/ilxutils/interlex_sql.py
https://github.com/tgbugs/pyontutils/blob/3d913db29c177db39151592909a4f56170ef8b35/ilxutils/ilxutils/interlex_sql.py#L218-L240
def get_terms_complete(self) -> pd.DataFrame: ''' Gets complete entity data like term/view ''' if not self.terms_complete.empty: return self.terms_complete if self.from_backup: self.terms_complete = open_pickle(TERMS_COMPLETE_BACKUP_PATH) return self.terms_complete ilx2synonyms = self.get_ilx2synonyms() ilx2existing_ids = self.get_ilx2existing_ids() ilx2annotations = self.get_ilx2annotations() ilx2superclass = self.get_ilx2superclass() ilx_complete = [] header = ['Index'] + list(self.fetch_terms().columns) for row in self.fetch_terms().itertuples(): row = {header[i]:val for i, val in enumerate(row)} row['synonyms'] = ilx2synonyms.get(row['ilx']) row['existing_ids'] = ilx2existing_ids[row['ilx']] # if breaks we have worse problems row['annotations'] = ilx2annotations.get(row['ilx']) row['superclass'] = ilx2superclass.get(row['ilx']) ilx_complete.append(row) terms_complete = pd.DataFrame(ilx_complete) create_pickle(terms_complete, TERMS_COMPLETE_BACKUP_PATH) return terms_complete
[ "def", "get_terms_complete", "(", "self", ")", "->", "pd", ".", "DataFrame", ":", "if", "not", "self", ".", "terms_complete", ".", "empty", ":", "return", "self", ".", "terms_complete", "if", "self", ".", "from_backup", ":", "self", ".", "terms_complete", ...
Gets complete entity data like term/view
[ "Gets", "complete", "entity", "data", "like", "term", "/", "view" ]
python
train
ASKIDA/Selenium2LibraryExtension
src/Selenium2LibraryExtension/keywords/__init__.py
https://github.com/ASKIDA/Selenium2LibraryExtension/blob/5ca3fa776063c6046dff317cb2575e4772d7541f/src/Selenium2LibraryExtension/keywords/__init__.py#L71-L81
def wait_until_element_value_contains(self, locator, expected, timeout=None): """Waits until the element identified by `locator` contains the expected value. You might want to use `Element Value Should Contain` instead. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | expected | expected value | Slim Shady | | timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |""" self._info("Waiting for '%s' value to contain '%s'" % (locator, expected)) self._wait_until_no_error(timeout, self._check_element_value_exp, True, locator, expected, False, timeout)
[ "def", "wait_until_element_value_contains", "(", "self", ",", "locator", ",", "expected", ",", "timeout", "=", "None", ")", ":", "self", ".", "_info", "(", "\"Waiting for '%s' value to contain '%s'\"", "%", "(", "locator", ",", "expected", ")", ")", "self", ".",...
Waits until the element identified by `locator` contains the expected value. You might want to use `Element Value Should Contain` instead. | *Argument* | *Description* | *Example* | | locator | Selenium 2 element locator | id=my_id | | expected | expected value | Slim Shady | | timeout | maximum time to wait before the function throws an element not found error (default=None) | 5s |
[ "Waits", "until", "the", "element", "identified", "by", "locator", "contains", "the", "expected", "value", ".", "You", "might", "want", "to", "use", "Element", "Value", "Should", "Contain", "instead", "." ]
python
train
MarcoFavorito/flloat
flloat/parser/ltlf.py
https://github.com/MarcoFavorito/flloat/blob/5e6de1bea444b68d46d288834031860a8b2f8c2d/flloat/parser/ltlf.py#L77-L127
def p_formula(self, p): """formula : formula EQUIVALENCE formula | formula IMPLIES formula | formula OR formula | formula AND formula | formula UNTIL formula | formula RELEASE formula | EVENTUALLY formula | ALWAYS formula | NEXT formula | WEAK_NEXT formula | NOT formula | TRUE | FALSE | ATOM""" if len(p) == 2: if p[1] == Symbols.TRUE.value: p[0] = LTLfTrue() elif p[1] == Symbols.FALSE.value: p[0] = LTLfFalse() else: p[0] = LTLfAtomic(Symbol(p[1])) elif len(p) == 3: if p[1] == Symbols.NEXT.value: p[0] = LTLfNext(p[2]) elif p[1] == Symbols.WEAK_NEXT.value: p[0] = LTLfWeakNext(p[2]) elif p[1] == Symbols.EVENTUALLY.value: p[0] = LTLfEventually(p[2]) elif p[1] == Symbols.ALWAYS.value: p[0] = LTLfAlways(p[2]) elif p[1] == Symbols.NOT.value: p[0] = LTLfNot(p[2]) elif len(p) == 4: l, o, r = p[1:] if o == Symbols.EQUIVALENCE.value: p[0] = LTLfEquivalence([l, r]) elif o == Symbols.IMPLIES.value: p[0] = LTLfImplies([l, r]) elif o == Symbols.OR.value: p[0] = LTLfOr([l, r]) elif o == Symbols.AND.value: p[0] = LTLfAnd([l, r]) elif o == Symbols.UNTIL.value: p[0] = LTLfUntil([l, r]) elif o == Symbols.RELEASE.value: p[0] = LTLfRelease([l, r]) else: raise ValueError else: raise ValueError
[ "def", "p_formula", "(", "self", ",", "p", ")", ":", "if", "len", "(", "p", ")", "==", "2", ":", "if", "p", "[", "1", "]", "==", "Symbols", ".", "TRUE", ".", "value", ":", "p", "[", "0", "]", "=", "LTLfTrue", "(", ")", "elif", "p", "[", "...
formula : formula EQUIVALENCE formula | formula IMPLIES formula | formula OR formula | formula AND formula | formula UNTIL formula | formula RELEASE formula | EVENTUALLY formula | ALWAYS formula | NEXT formula | WEAK_NEXT formula | NOT formula | TRUE | FALSE | ATOM
[ "formula", ":", "formula", "EQUIVALENCE", "formula", "|", "formula", "IMPLIES", "formula", "|", "formula", "OR", "formula", "|", "formula", "AND", "formula", "|", "formula", "UNTIL", "formula", "|", "formula", "RELEASE", "formula", "|", "EVENTUALLY", "formula", ...
python
train
pantsbuild/pants
src/python/pants/backend/jvm/tasks/ivy_resolve.py
https://github.com/pantsbuild/pants/blob/b72e650da0df685824ffdcc71988b8c282d0962d/src/python/pants/backend/jvm/tasks/ivy_resolve.py#L88-L117
def execute(self): """Resolves the specified confs for the configured targets and returns an iterator over tuples of (conf, jar path). """ if JvmResolveSubsystem.global_instance().get_options().resolver != 'ivy': return compile_classpath = self.context.products.get_data('compile_classpath', init_func=ClasspathProducts.init_func(self.get_options().pants_workdir)) targets = self.context.targets() if all(not isinstance(target, JarLibrary) for target in targets): if self._report: self.context.log.info("Not generating a report. No resolution performed.") return executor = self.create_java_executor() results = self.resolve(executor=executor, targets=targets, classpath_products=compile_classpath, confs=self.get_options().confs, extra_args=self._args) if self._report: results_with_resolved_artifacts = [r for r in results if r.has_resolved_artifacts] if not results_with_resolved_artifacts: self.context.log.info("Not generating a report. No resolution performed.") else: for result in results_with_resolved_artifacts: self._generate_ivy_report(result)
[ "def", "execute", "(", "self", ")", ":", "if", "JvmResolveSubsystem", ".", "global_instance", "(", ")", ".", "get_options", "(", ")", ".", "resolver", "!=", "'ivy'", ":", "return", "compile_classpath", "=", "self", ".", "context", ".", "products", ".", "ge...
Resolves the specified confs for the configured targets and returns an iterator over tuples of (conf, jar path).
[ "Resolves", "the", "specified", "confs", "for", "the", "configured", "targets", "and", "returns", "an", "iterator", "over", "tuples", "of", "(", "conf", "jar", "path", ")", "." ]
python
train
lingpy/sinopy
src/sinopy/sinopy.py
https://github.com/lingpy/sinopy/blob/59a47fcdfae3e0000ac6d2b3d7919bf875ec2056/src/sinopy/sinopy.py#L276-L326
def fanqie2mch(fanqie, debug=False): """ Convert a Fǎnqiè reading to it's MCH counterpart. Important: we need to identify the medials in the xia-syllable. We also need to make additional background-checks, since in the current form, the approach is not error-prone and does not what it is supposed to do! """ # check for gbk fanqie = gbk2big5(fanqie) # get normal vals shangxia = chars2baxter(fanqie) # check for good fixed solutions in our dictionary if fanqie[0] in _cd.GY['sheng']: shang = _cd.GY['sheng'][fanqie[0]]+'a' else: shang = shangxia[0] xia = shangxia[1] # check for bad vals if ' ' in shang: shang = shang[:shang.index(' ')] if ' ' in xia: xia = xia[:xia.index(' ')] if ',' in shang or ',' in xia or not shang or not xia: raise ValueError('fanqie {0} {1} is ambiguous'.format(shang,xia)) # parse chars shangp = parse_baxter(shang) xiap = parse_baxter(xia) if debug: return '.'.join(shangp),'.'.join(xiap),shang,xia i = shangp[0] m = xiap[1] f = xiap[2] t = xiap[3].replace('P','').replace('R','') # ugly, change later XXX # clean medial-relations if ('y' in i or 'i' in f) and 'j' in m: m = m.replace('j','') if 'u' in f and 'w' in m: m = m.replace('w','') return ''.join([i,m,f,t])
[ "def", "fanqie2mch", "(", "fanqie", ",", "debug", "=", "False", ")", ":", "# check for gbk", "fanqie", "=", "gbk2big5", "(", "fanqie", ")", "# get normal vals", "shangxia", "=", "chars2baxter", "(", "fanqie", ")", "# check for good fixed solutions in our dictionary", ...
Convert a Fǎnqiè reading to it's MCH counterpart. Important: we need to identify the medials in the xia-syllable. We also need to make additional background-checks, since in the current form, the approach is not error-prone and does not what it is supposed to do!
[ "Convert", "a", "Fǎnqiè", "reading", "to", "it", "s", "MCH", "counterpart", ".", "Important", ":", "we", "need", "to", "identify", "the", "medials", "in", "the", "xia", "-", "syllable", ".", "We", "also", "need", "to", "make", "additional", "background", ...
python
train
yamins81/tabular
tabular/io.py
https://github.com/yamins81/tabular/blob/1caf091c8c395960a9ad7078f95158b533cc52dd/tabular/io.py#L1177-L1354
def processmetadata(metadata, items=None, comments=None, delimiter_regex=None, ncols=None, verbosity=DEFAULT_VERBOSITY): """ Process Metadata from stored (or "packed") state to functional state. Metadata can come be read from a file "packed" in various ways, e.g. with a string representation of a dialect or coloring dictionary. This function "unpacks" the stored metadata into useable Python objects. It consists of a list of quasi-modular parts, one for each type of recognized metadata. **Parameters** **metadata** : dictionary This argument is a dictionary whose keys are strings denoting different kinds of metadata (e.g. "names" or "formats") and whose values are the metadata of that type. The metadata dictionary is modified IN-PLACE by this function. **items** : string or list of strings, optional The items arguments specifies which metadata keys are to be processed. E.g. of items = 'names,formats', then the "names" metadata and "formats" metadata will be processed, but no others. Note however, that sometimes, the processing of one type of metadata requires that another be processed first, e.g. "dialect" must processed into an actual CSV.dialect object before "names" is processed. (The processed of "names" metadata involves splitting the names metadata string into a list, using the delimiter. This delimiter is part of the dialect object.) In these cases, if you call processmetadata on one item before its requirements are processed, nothing will happen. **comments** : single-character string, optional The comments character is used to process many pieces of metadata, e.g. it is striped of the left side of names and formats strings before splitting on delimiter. **verbosity** : integer, optional Determines the level of verbosity in the printout of messages during the running of the procedure. **Returns** Nothing. """ items = items.split(',') if isinstance(items,str) else items if comments is None: if 'comments' in metadata.keys(): comments = metadata['comments'] else: comments = '#' if verbosity > 8: print 'processing metadata with comments char = #' else: if (('comments' in metadata.keys()) and (comments != metadata['comments']) and (verbosity > 8)): print 'comments character specified to process metadata (', repr(comments) ,') is different from comments charcater set in metadata dictionary (', repr(metadata['comments']) , ').' if not items: for k in metadata.keys(): if is_string_like(metadata[k]): metadata[k] = '\n'.join([x.lstrip(comments) for x in metadata[k].split('\n') ]) if not items or 'dialect' in items: if 'dialect' in metadata.keys(): if isinstance(metadata['dialect'],str): D = dialectfromstring(metadata['dialect'].lstrip(comments)) if D: metadata['dialect'] = D if (verbosity > 8): print 'processed dialect from string' else: if (verbosity > 8): print '''Dialect failed to be converted properly from string representation in metadata.''' if 'delimiter' in dir(metadata['dialect']): for a in dir(metadata['dialect']): if not a.startswith('_') and a in metadata.keys(): setattr(metadata['dialect'],a, metadata[a]) if ((verbosity > 2 and a == 'delimiter') or (verbosity >= 8)): print 'Setting dialect attribute', a, 'to equal specified value:', repr(metadata[a]) elif not a.startswith('_') and a not in metadata.keys(): metadata[a] = getattr(metadata['dialect'], a) if ((verbosity > 2 and a == 'delimiter') or (verbosity >= 8)): print 'Setting metadata attribute from dialect', a , 'to equal specified value:', repr(metadata[a]) if (not items or 'names' in items) and ('names' in metadata.keys()): if is_string_like(metadata['names']): if delimiter_regex: metadata['names'] = delimiter_regex.split(metadata['names']) elif (('dialect' in metadata.keys()) and ('delimiter' in dir(metadata['dialect']))): d = metadata['dialect'] n = metadata['names'] metadata['names'] = list(csv.reader([n.lstrip(comments)], dialect=d))[0] if (verbosity > 8): print '... splitting "names" metadata from string with delimiter', repr(d.delimiter), '. Resulting names:', metadata['names'] if (not items or 'formats' in items) and 'formats' in metadata.keys(): if is_string_like(metadata['formats']): if delimiter_regex: metadata['formats'] = delimiter_regex.split(metadata['formats']) elif (('dialect' in metadata.keys()) and ('delimiter' in dir(metadata['dialect']))): d = metadata['dialect'] n = metadata['formats'] metadata['formats'] = list(csv.reader([n.lstrip(comments)], dialect=d))[0] if (verbosity > 8): print '... splitting "formats" metadata from string with delimiter', repr(d.delimiter), '. Resulting names:', metadata['formats'] if ncols: metadata['formats'] = postprocessformats(metadata['formats'],ncols) if (not items or 'types' in items) and 'types' in metadata.keys(): if is_string_like(metadata['types']): if delimiter_regex: metadata['types'] = delimiter_regex.split(metadata['types']) elif (('dialect' in metadata.keys()) and ('delimiter' in dir(metadata['dialect']))): d = metadata['dialect'] n = metadata['types'] metadata['types'] = list(csv.reader([n.lstrip(comments)], dialect=d))[0] if (verbosity > 8): print '... splitting "types" metadata from string with delimiter', repr(d.delimiter), '. Resulting names:', metadata['types'] if ncols: metadata['types'] = postprocessformats(metadata['types'],ncols) if (not items or 'coloring' in items) and ('coloring' in metadata.keys()): if is_string_like(metadata['coloring']): C = coloringfromstring(metadata['coloring'].lstrip(comments)) if C: metadata['coloring'] = C if (verbosity > 8): print '... processed coloring from string' else: if verbosity > 1: print 'Coloring failed to be converted properly from string representation in metadata ; removing coloring data from active metadata (putting it in "loaded_coloring").' metadata['loaded_coloring'] = metadata['coloring'] metadata.pop('coloring') if (not items or 'headerlines' in items): if 'headerlines' in metadata.keys(): if isinstance(metadata['headerlines'], str): try: h = metadata['headerlines'] metadata['headerlines'] = int(h.lstrip(comments)) except (ValueError,TypeError): if verbosity > 6: print 'headerlines metadata failed to convert to an integer.' else: pass if isinstance(metadata['headerlines'], int): if 'metametadata' in metadata.keys(): h= metadata['headerlines'] mmd = metadata['metametadata'] metadata['headerlines'] = max(h, 1 + max([v if isinstance(v, int) else max(v) for v in mmd.values()])) if ((metadata['headerlines'] > h) and (verbosity > 8)): print 'Resetting headerlines from', h, 'to', metadata['headerlines'], 'because of line number indications from metametadata.'
[ "def", "processmetadata", "(", "metadata", ",", "items", "=", "None", ",", "comments", "=", "None", ",", "delimiter_regex", "=", "None", ",", "ncols", "=", "None", ",", "verbosity", "=", "DEFAULT_VERBOSITY", ")", ":", "items", "=", "items", ".", "split", ...
Process Metadata from stored (or "packed") state to functional state. Metadata can come be read from a file "packed" in various ways, e.g. with a string representation of a dialect or coloring dictionary. This function "unpacks" the stored metadata into useable Python objects. It consists of a list of quasi-modular parts, one for each type of recognized metadata. **Parameters** **metadata** : dictionary This argument is a dictionary whose keys are strings denoting different kinds of metadata (e.g. "names" or "formats") and whose values are the metadata of that type. The metadata dictionary is modified IN-PLACE by this function. **items** : string or list of strings, optional The items arguments specifies which metadata keys are to be processed. E.g. of items = 'names,formats', then the "names" metadata and "formats" metadata will be processed, but no others. Note however, that sometimes, the processing of one type of metadata requires that another be processed first, e.g. "dialect" must processed into an actual CSV.dialect object before "names" is processed. (The processed of "names" metadata involves splitting the names metadata string into a list, using the delimiter. This delimiter is part of the dialect object.) In these cases, if you call processmetadata on one item before its requirements are processed, nothing will happen. **comments** : single-character string, optional The comments character is used to process many pieces of metadata, e.g. it is striped of the left side of names and formats strings before splitting on delimiter. **verbosity** : integer, optional Determines the level of verbosity in the printout of messages during the running of the procedure. **Returns** Nothing.
[ "Process", "Metadata", "from", "stored", "(", "or", "packed", ")", "state", "to", "functional", "state", "." ]
python
train
jsommers/switchyard
switchyard/lib/packet/udp.py
https://github.com/jsommers/switchyard/blob/fdcb3869c937dcedbd6ea7a7822ebd412bf1e2b0/switchyard/lib/packet/udp.py#L30-L35
def to_bytes(self): ''' Return packed byte representation of the UDP header. ''' return struct.pack(UDP._PACKFMT, self._src, self._dst, self._len, self._checksum)
[ "def", "to_bytes", "(", "self", ")", ":", "return", "struct", ".", "pack", "(", "UDP", ".", "_PACKFMT", ",", "self", ".", "_src", ",", "self", ".", "_dst", ",", "self", ".", "_len", ",", "self", ".", "_checksum", ")" ]
Return packed byte representation of the UDP header.
[ "Return", "packed", "byte", "representation", "of", "the", "UDP", "header", "." ]
python
train
kwikteam/phy
phy/cluster/clustering.py
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/clustering.py#L510-L537
def redo(self): """Redo the last cluster assignment operation. Returns ------- up : UpdateInfo instance of the changes done by this operation. """ # Go forward in the stack, and retrieve the new assignment. item = self._undo_stack.forward() if item is None: # No redo has been performed: abort. return # NOTE: the undo_state object is only returned when undoing. # It represents data associated to the state # *before* the action. What might be more useful would be the # undo_state object of the next item in the list (if it exists). spike_ids, cluster_ids, undo_state = item assert spike_ids is not None # We apply the new assignment. up = self._do_assign(spike_ids, cluster_ids) up.history = 'redo' self.emit('cluster', up) return up
[ "def", "redo", "(", "self", ")", ":", "# Go forward in the stack, and retrieve the new assignment.", "item", "=", "self", ".", "_undo_stack", ".", "forward", "(", ")", "if", "item", "is", "None", ":", "# No redo has been performed: abort.", "return", "# NOTE: the undo_s...
Redo the last cluster assignment operation. Returns ------- up : UpdateInfo instance of the changes done by this operation.
[ "Redo", "the", "last", "cluster", "assignment", "operation", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/layers/common_layers.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/layers/common_layers.py#L2238-L2298
def sru_with_scan(x, num_layers=2, activation=None, initial_state=None, name=None, reuse=None): """SRU cell as in https://arxiv.org/abs/1709.02755. This implementation uses tf.scan and can incur overhead, see the full SRU function doc for details and an implementation that is sometimes faster. Args: x: A tensor of shape [batch, ..., channels] ; ... is treated as time. num_layers: How many SRU layers; default is 2 as results for 1 disappoint. activation: Optional activation function, try tf.nn.tanh or tf.nn.relu. initial_state: Optional initial c-state, set to zeros if None. name: Optional name, "sru" by default. reuse: Optional reuse. Returns: A tensor of the same shape as x. Raises: ValueError: if num_layers is not positive. """ if num_layers < 1: raise ValueError("Number of layers must be positive: %d" % num_layers) with tf.variable_scope(name, default_name="sru", values=[x], reuse=reuse): # We assume x is [batch, ..., channels] and treat all ... as time. x_shape = shape_list(x) x = tf.reshape(x, [x_shape[0], -1, x_shape[-1]]) x = tf.transpose(x, [1, 0, 2]) # Scan assumes time on axis 0. initial_state = initial_state or tf.zeros([x_shape[0], x_shape[-1]]) # SRU state manipulation function. def next_state(cur_state, args_tup): cur_x_times_one_minus_f, cur_f = args_tup return cur_f * cur_state + cur_x_times_one_minus_f # Calculate SRU on each layer. for i in range(num_layers): # The parallel part of the SRU. x_orig = x x, f, r = tf.split( layers().Dense(3 * x_shape[-1], name="kernel_%d" % i)(x), 3, axis=-1) f, r = tf.sigmoid(f), tf.sigmoid(r) x_times_one_minus_f = x * (1.0 - f) # Compute in parallel for speed. # Calculate states. c_states = tf.scan( next_state, (x_times_one_minus_f, f), initializer=initial_state, parallel_iterations=2, name="scan_%d" % i) # Final output. if activation is not None: c_states = activation(c_states) h = c_states * r + (1.0 - r) * x_orig x = h # Next layer. # Transpose back to batch-major. x = tf.transpose(x, [1, 0, 2]) return tf.reshape(x, x_shape)
[ "def", "sru_with_scan", "(", "x", ",", "num_layers", "=", "2", ",", "activation", "=", "None", ",", "initial_state", "=", "None", ",", "name", "=", "None", ",", "reuse", "=", "None", ")", ":", "if", "num_layers", "<", "1", ":", "raise", "ValueError", ...
SRU cell as in https://arxiv.org/abs/1709.02755. This implementation uses tf.scan and can incur overhead, see the full SRU function doc for details and an implementation that is sometimes faster. Args: x: A tensor of shape [batch, ..., channels] ; ... is treated as time. num_layers: How many SRU layers; default is 2 as results for 1 disappoint. activation: Optional activation function, try tf.nn.tanh or tf.nn.relu. initial_state: Optional initial c-state, set to zeros if None. name: Optional name, "sru" by default. reuse: Optional reuse. Returns: A tensor of the same shape as x. Raises: ValueError: if num_layers is not positive.
[ "SRU", "cell", "as", "in", "https", ":", "//", "arxiv", ".", "org", "/", "abs", "/", "1709", ".", "02755", "." ]
python
train
google/grr
grr/core/grr_response_core/lib/utils.py
https://github.com/google/grr/blob/5cef4e8e2f0d5df43ea4877e9c798e0bf60bfe74/grr/core/grr_response_core/lib/utils.py#L629-L649
def JoinPath(stem="", *parts): """A sane version of os.path.join. The intention here is to append the stem to the path. The standard module removes the path if the stem begins with a /. Args: stem: The stem to join to. *parts: parts of the path to join. The first arg is always the root and directory traversal is not allowed. Returns: a normalized path. """ # Ensure all path components are unicode parts = [SmartUnicode(path) for path in parts] result = (stem + NormalizePath(u"/".join(parts))).replace("//", "/") result = result.rstrip("/") return result or "/"
[ "def", "JoinPath", "(", "stem", "=", "\"\"", ",", "*", "parts", ")", ":", "# Ensure all path components are unicode", "parts", "=", "[", "SmartUnicode", "(", "path", ")", "for", "path", "in", "parts", "]", "result", "=", "(", "stem", "+", "NormalizePath", ...
A sane version of os.path.join. The intention here is to append the stem to the path. The standard module removes the path if the stem begins with a /. Args: stem: The stem to join to. *parts: parts of the path to join. The first arg is always the root and directory traversal is not allowed. Returns: a normalized path.
[ "A", "sane", "version", "of", "os", ".", "path", ".", "join", "." ]
python
train
Azure/msrestazure-for-python
msrestazure/polling/arm_polling.py
https://github.com/Azure/msrestazure-for-python/blob/5f99262305692525d03ca87d2c5356b05c5aa874/msrestazure/polling/arm_polling.py#L87-L100
def get_header_url(response, header_name): """Get a URL from a header requests. :param requests.Response response: REST call response. :param str header_name: Header name. :returns: URL if not None AND valid, None otherwise """ url = response.headers.get(header_name) try: _validate(url) except ValueError: return None else: return url
[ "def", "get_header_url", "(", "response", ",", "header_name", ")", ":", "url", "=", "response", ".", "headers", ".", "get", "(", "header_name", ")", "try", ":", "_validate", "(", "url", ")", "except", "ValueError", ":", "return", "None", "else", ":", "re...
Get a URL from a header requests. :param requests.Response response: REST call response. :param str header_name: Header name. :returns: URL if not None AND valid, None otherwise
[ "Get", "a", "URL", "from", "a", "header", "requests", "." ]
python
train
phrase/django-phrase
phrase/templatetags/phrase_i18n.py
https://github.com/phrase/django-phrase/blob/10dbd53513edd30da3fd6c020bcd7f0a1b7338b9/phrase/templatetags/phrase_i18n.py#L21-L98
def do_translate(parser, token): """ This will mark a string for translation and will translate the string for the current language. Usage:: {% trans "this is a test" %} This will mark the string for translation so it will be pulled out by mark-messages.py into the .po files and will run the string through the translation engine. There is a second form:: {% trans "this is a test" noop %} This will only mark for translation, but will return the string unchanged. Use it when you need to store values into forms that should be translated later on. You can use variables instead of constant strings to translate stuff you marked somewhere else:: {% trans variable %} This will just try to translate the contents of the variable ``variable``. Make sure that the string in there is something that is in the .po file. It is possible to store the translated string into a variable:: {% trans "this is a test" as var %} {{ var }} Contextual translations are also supported:: {% trans "this is a test" context "greeting" %} This is equivalent to calling pgettext instead of (u)gettext. """ bits = token.split_contents() if len(bits) < 2: raise TemplateSyntaxError("'%s' takes at least one argument" % bits[0]) message_string = parser.compile_filter(bits[1]) remaining = bits[2:] noop = False asvar = None message_context = None seen = set() invalid_context = {'as', 'noop'} while remaining: option = remaining.pop(0) if option in seen: raise TemplateSyntaxError( "The '%s' option was specified more than once." % option, ) elif option == 'noop': noop = True elif option == 'context': try: value = remaining.pop(0) except IndexError: msg = "No argument provided to the '%s' tag for the context option." % bits[0] six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2]) if value in invalid_context: raise TemplateSyntaxError( "Invalid argument '%s' provided to the '%s' tag for the context option" % (value, bits[0]), ) message_context = parser.compile_filter(value) elif option == 'as': try: value = remaining.pop(0) except IndexError: msg = "No argument provided to the '%s' tag for the as option." % bits[0] six.reraise(TemplateSyntaxError, TemplateSyntaxError(msg), sys.exc_info()[2]) asvar = value else: raise TemplateSyntaxError( "Unknown argument for '%s' tag: '%s'. The only options " "available are 'noop', 'context' \"xxx\", and 'as VAR'." % ( bits[0], option, ) ) seen.add(option) if phrase_settings.PHRASE_ENABLED: return PhraseTranslateNode(message_string, noop, asvar, message_context) else: return TranslateNode(message_string, noop, asvar, message_context)
[ "def", "do_translate", "(", "parser", ",", "token", ")", ":", "bits", "=", "token", ".", "split_contents", "(", ")", "if", "len", "(", "bits", ")", "<", "2", ":", "raise", "TemplateSyntaxError", "(", "\"'%s' takes at least one argument\"", "%", "bits", "[", ...
This will mark a string for translation and will translate the string for the current language. Usage:: {% trans "this is a test" %} This will mark the string for translation so it will be pulled out by mark-messages.py into the .po files and will run the string through the translation engine. There is a second form:: {% trans "this is a test" noop %} This will only mark for translation, but will return the string unchanged. Use it when you need to store values into forms that should be translated later on. You can use variables instead of constant strings to translate stuff you marked somewhere else:: {% trans variable %} This will just try to translate the contents of the variable ``variable``. Make sure that the string in there is something that is in the .po file. It is possible to store the translated string into a variable:: {% trans "this is a test" as var %} {{ var }} Contextual translations are also supported:: {% trans "this is a test" context "greeting" %} This is equivalent to calling pgettext instead of (u)gettext.
[ "This", "will", "mark", "a", "string", "for", "translation", "and", "will", "translate", "the", "string", "for", "the", "current", "language", ".", "Usage", "::", "{", "%", "trans", "this", "is", "a", "test", "%", "}", "This", "will", "mark", "the", "s...
python
train
wal-e/wal-e
wal_e/operator/backup.py
https://github.com/wal-e/wal-e/blob/027263860e72a403bc0e1497bb3e67523138e7a2/wal_e/operator/backup.py#L293-L363
def wal_restore(self, wal_name, wal_destination, prefetch_max): """ Downloads a WAL file from S3 or Windows Azure Blob Service This code is intended to typically be called from Postgres's restore_command feature. NB: Postgres doesn't guarantee that wal_name == basename(wal_path), so both are required. """ url = '{0}://{1}/{2}'.format( self.layout.scheme, self.layout.store_name(), self.layout.wal_path(wal_name)) if prefetch_max > 0: # Check for prefetch-hit. base = os.path.dirname(os.path.realpath(wal_destination)) pd = prefetch.Dirs(base) seg = WalSegment(wal_name) started = start_prefetches(seg, pd, prefetch_max) last_size = 0 while True: if pd.contains(seg): pd.promote(seg, wal_destination) logger.info( msg='promoted prefetched wal segment', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix}) pd.clear_except(started) return True # If there is a 'running' download, wait a bit for it # to make progress or finish. However, if it doesn't # make progress in some amount of time, assume that # the prefetch process has died and go on with the # in-band downloading code. sz = pd.running_size(seg) if sz <= last_size: break last_size = sz gevent.sleep(0.5) pd.clear_except(started) logger.info( msg='begin wal restore', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix, 'state': 'begin'}) ret = do_lzop_get(self.creds, url, wal_destination, self.gpg_key_id is not None) logger.info( msg='complete wal restore', structured={'action': 'wal-fetch', 'key': url, 'seg': wal_name, 'prefix': self.layout.path_prefix, 'state': 'complete'}) return ret
[ "def", "wal_restore", "(", "self", ",", "wal_name", ",", "wal_destination", ",", "prefetch_max", ")", ":", "url", "=", "'{0}://{1}/{2}'", ".", "format", "(", "self", ".", "layout", ".", "scheme", ",", "self", ".", "layout", ".", "store_name", "(", ")", "...
Downloads a WAL file from S3 or Windows Azure Blob Service This code is intended to typically be called from Postgres's restore_command feature. NB: Postgres doesn't guarantee that wal_name == basename(wal_path), so both are required.
[ "Downloads", "a", "WAL", "file", "from", "S3", "or", "Windows", "Azure", "Blob", "Service" ]
python
train
sorgerlab/indra
indra/tools/incremental_model.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/tools/incremental_model.py#L149-L163
def get_model_agents(self): """Return a list of all Agents from all Statements. Returns ------- agents : list[indra.statements.Agent] A list of Agents that are in the model. """ model_stmts = self.get_statements() agents = [] for stmt in model_stmts: for a in stmt.agent_list(): if a is not None: agents.append(a) return agents
[ "def", "get_model_agents", "(", "self", ")", ":", "model_stmts", "=", "self", ".", "get_statements", "(", ")", "agents", "=", "[", "]", "for", "stmt", "in", "model_stmts", ":", "for", "a", "in", "stmt", ".", "agent_list", "(", ")", ":", "if", "a", "i...
Return a list of all Agents from all Statements. Returns ------- agents : list[indra.statements.Agent] A list of Agents that are in the model.
[ "Return", "a", "list", "of", "all", "Agents", "from", "all", "Statements", "." ]
python
train
gabstopper/smc-python
smc/elements/network.py
https://github.com/gabstopper/smc-python/blob/e027b8a5dcfaf884eada32d113d41c1e56b32457/smc/elements/network.py#L283-L305
def create(cls, name, ne_ref=None, operator='exclusion', sub_expression=None, comment=None): """ Create the expression :param str name: name of expression :param list ne_ref: network element references for expression :param str operator: 'exclusion' (negation), 'union', 'intersection' (default: exclusion) :param dict sub_expression: sub expression used :param str comment: optional comment :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Expression """ sub_expression = [] if sub_expression is None else [sub_expression] json = {'name': name, 'operator': operator, 'ne_ref': ne_ref, 'sub_expression': sub_expression, 'comment': comment} return ElementCreator(cls, json)
[ "def", "create", "(", "cls", ",", "name", ",", "ne_ref", "=", "None", ",", "operator", "=", "'exclusion'", ",", "sub_expression", "=", "None", ",", "comment", "=", "None", ")", ":", "sub_expression", "=", "[", "]", "if", "sub_expression", "is", "None", ...
Create the expression :param str name: name of expression :param list ne_ref: network element references for expression :param str operator: 'exclusion' (negation), 'union', 'intersection' (default: exclusion) :param dict sub_expression: sub expression used :param str comment: optional comment :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Expression
[ "Create", "the", "expression" ]
python
train
fastai/fastai
fastai/basic_train.py
https://github.com/fastai/fastai/blob/9fb84a5cdefe5a766cdb792b8f5d8971737b7e67/fastai/basic_train.py#L20-L38
def loss_batch(model:nn.Module, xb:Tensor, yb:Tensor, loss_func:OptLossFunc=None, opt:OptOptimizer=None, cb_handler:Optional[CallbackHandler]=None)->Tuple[Union[Tensor,int,float,str]]: "Calculate loss and metrics for a batch, call out to callbacks as necessary." cb_handler = ifnone(cb_handler, CallbackHandler()) if not is_listy(xb): xb = [xb] if not is_listy(yb): yb = [yb] out = model(*xb) out = cb_handler.on_loss_begin(out) if not loss_func: return to_detach(out), yb[0].detach() loss = loss_func(out, *yb) if opt is not None: loss,skip_bwd = cb_handler.on_backward_begin(loss) if not skip_bwd: loss.backward() if not cb_handler.on_backward_end(): opt.step() if not cb_handler.on_step_end(): opt.zero_grad() return loss.detach().cpu()
[ "def", "loss_batch", "(", "model", ":", "nn", ".", "Module", ",", "xb", ":", "Tensor", ",", "yb", ":", "Tensor", ",", "loss_func", ":", "OptLossFunc", "=", "None", ",", "opt", ":", "OptOptimizer", "=", "None", ",", "cb_handler", ":", "Optional", "[", ...
Calculate loss and metrics for a batch, call out to callbacks as necessary.
[ "Calculate", "loss", "and", "metrics", "for", "a", "batch", "call", "out", "to", "callbacks", "as", "necessary", "." ]
python
train
QInfer/python-qinfer
src/qinfer/distributions.py
https://github.com/QInfer/python-qinfer/blob/8170c84a0be1723f8c6b09e0d3c7a40a886f1fe3/src/qinfer/distributions.py#L1321-L1347
def sample(self, n=1): """ Returns one or more samples from this probability distribution. :param int n: Number of samples to return. :return numpy.ndarray: An array containing samples from the distribution of shape ``(n, d)``, where ``d`` is the number of random variables. """ samples = np.empty((n, self.n_rvs)) idxs_to_sample = np.arange(n) iters = 0 while idxs_to_sample.size and iters < self._maxiters: samples[idxs_to_sample] = self._dist.sample(len(idxs_to_sample)) idxs_to_sample = idxs_to_sample[np.nonzero(np.logical_not( self._model.are_models_valid(samples[idxs_to_sample, :]) ))[0]] iters += 1 if idxs_to_sample.size: raise RuntimeError("Did not successfully postselect within {} iterations.".format(self._maxiters)) return samples
[ "def", "sample", "(", "self", ",", "n", "=", "1", ")", ":", "samples", "=", "np", ".", "empty", "(", "(", "n", ",", "self", ".", "n_rvs", ")", ")", "idxs_to_sample", "=", "np", ".", "arange", "(", "n", ")", "iters", "=", "0", "while", "idxs_to_...
Returns one or more samples from this probability distribution. :param int n: Number of samples to return. :return numpy.ndarray: An array containing samples from the distribution of shape ``(n, d)``, where ``d`` is the number of random variables.
[ "Returns", "one", "or", "more", "samples", "from", "this", "probability", "distribution", "." ]
python
train
pyQode/pyqode.core
pyqode/core/panels/search_and_replace.py
https://github.com/pyQode/pyqode.core/blob/a99ec6cd22d519394f613309412f8329dc4e90cb/pyqode/core/panels/search_and_replace.py#L443-L487
def replace(self, text=None): """ Replaces the selected occurrence. :param text: The replacement text. If it is None, the lineEditReplace's text is used instead. :return True if the text could be replace properly, False if there is no more occurrences to replace. """ if text is None or isinstance(text, bool): text = self.lineEditReplace.text() current_occurences = self._current_occurrence() occurrences = self.get_occurences() if current_occurences == -1: self.select_next() current_occurences = self._current_occurrence() try: # prevent search request due to editor textChanged try: self.editor.textChanged.disconnect(self.request_search) except (RuntimeError, TypeError): # already disconnected pass occ = occurrences[current_occurences] cursor = self.editor.textCursor() cursor.setPosition(occ[0]) cursor.setPosition(occ[1], cursor.KeepAnchor) len_to_replace = len(cursor.selectedText()) len_replacement = len(text) offset = len_replacement - len_to_replace cursor.insertText(text) self.editor.setTextCursor(cursor) self._remove_occurrence(current_occurences, offset) current_occurences -= 1 self._set_current_occurrence(current_occurences) self.select_next() self.cpt_occurences = len(self.get_occurences()) self._update_label_matches() self._update_buttons() return True except IndexError: return False finally: self.editor.textChanged.connect(self.request_search)
[ "def", "replace", "(", "self", ",", "text", "=", "None", ")", ":", "if", "text", "is", "None", "or", "isinstance", "(", "text", ",", "bool", ")", ":", "text", "=", "self", ".", "lineEditReplace", ".", "text", "(", ")", "current_occurences", "=", "sel...
Replaces the selected occurrence. :param text: The replacement text. If it is None, the lineEditReplace's text is used instead. :return True if the text could be replace properly, False if there is no more occurrences to replace.
[ "Replaces", "the", "selected", "occurrence", "." ]
python
train
10gen/mongo-orchestration
mongo_orchestration/apps/links.py
https://github.com/10gen/mongo-orchestration/blob/81fd2224205922ea2178b08190b53a33aec47261/mongo_orchestration/apps/links.py#L181-L193
def all_sharded_cluster_links(cluster_id, shard_id=None, router_id=None, rel_to=None): """Get a list of all links to be included with ShardedClusters.""" return [ sharded_cluster_link(rel, cluster_id, shard_id, router_id, self_rel=(rel == rel_to)) for rel in ( 'get-sharded-clusters', 'get-sharded-cluster-info', 'sharded-cluster-command', 'delete-sharded-cluster', 'add-shard', 'get-shards', 'get-configsvrs', 'get-routers', 'add-router' ) ]
[ "def", "all_sharded_cluster_links", "(", "cluster_id", ",", "shard_id", "=", "None", ",", "router_id", "=", "None", ",", "rel_to", "=", "None", ")", ":", "return", "[", "sharded_cluster_link", "(", "rel", ",", "cluster_id", ",", "shard_id", ",", "router_id", ...
Get a list of all links to be included with ShardedClusters.
[ "Get", "a", "list", "of", "all", "links", "to", "be", "included", "with", "ShardedClusters", "." ]
python
train
KrzyHonk/bpmn-python
bpmn_python/bpmn_diagram_import.py
https://github.com/KrzyHonk/bpmn-python/blob/6e5e28e3d656dbf5bd3d85d78fe8e3f2fb462629/bpmn_python/bpmn_diagram_import.py#L490-L505
def import_complex_gateway_to_graph(diagram_graph, process_id, process_attributes, element): """ Adds to graph the new element that represents BPMN complex gateway. In addition to attributes inherited from Gateway type, complex gateway has additional attribute default flow (default value - none). :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML 'complexGateway' element. """ element_id = element.getAttribute(consts.Consts.id) BpmnDiagramGraphImport.import_gateway_to_graph(diagram_graph, process_id, process_attributes, element) diagram_graph.node[element_id][consts.Consts.default] = element.getAttribute(consts.Consts.default) \ if element.hasAttribute(consts.Consts.default) else None
[ "def", "import_complex_gateway_to_graph", "(", "diagram_graph", ",", "process_id", ",", "process_attributes", ",", "element", ")", ":", "element_id", "=", "element", ".", "getAttribute", "(", "consts", ".", "Consts", ".", "id", ")", "BpmnDiagramGraphImport", ".", ...
Adds to graph the new element that represents BPMN complex gateway. In addition to attributes inherited from Gateway type, complex gateway has additional attribute default flow (default value - none). :param diagram_graph: NetworkX graph representing a BPMN process diagram, :param process_id: string object, representing an ID of process element, :param process_attributes: dictionary that holds attribute values of 'process' element, which is parent of imported flow node, :param element: object representing a BPMN XML 'complexGateway' element.
[ "Adds", "to", "graph", "the", "new", "element", "that", "represents", "BPMN", "complex", "gateway", ".", "In", "addition", "to", "attributes", "inherited", "from", "Gateway", "type", "complex", "gateway", "has", "additional", "attribute", "default", "flow", "(",...
python
train
datamachine/twx
twx/twx.py
https://github.com/datamachine/twx/blob/d9633f12f3647b1e54ba87b70b39df3b7e02b4eb/twx/twx.py#L717-L725
def send_chat_action(self, peer: Peer, action: botapi.ChatAction, on_success: callable=None): """ Send status to peer. :param peer: Peer to send status to. :param action: Type of action to send to peer. :param on_success: Callback to call when call is complete. """ botapi.send_chat_action(chat_id=peer.id, action=action, on_success=on_success, **self.request_args).run()
[ "def", "send_chat_action", "(", "self", ",", "peer", ":", "Peer", ",", "action", ":", "botapi", ".", "ChatAction", ",", "on_success", ":", "callable", "=", "None", ")", ":", "botapi", ".", "send_chat_action", "(", "chat_id", "=", "peer", ".", "id", ",", ...
Send status to peer. :param peer: Peer to send status to. :param action: Type of action to send to peer. :param on_success: Callback to call when call is complete.
[ "Send", "status", "to", "peer", ".", ":", "param", "peer", ":", "Peer", "to", "send", "status", "to", ".", ":", "param", "action", ":", "Type", "of", "action", "to", "send", "to", "peer", ".", ":", "param", "on_success", ":", "Callback", "to", "call"...
python
train
Metatab/geoid
geoid/civick.py
https://github.com/Metatab/geoid/blob/4b7769406b00e59376fb6046b42a2f8ed706b33b/geoid/civick.py#L41-L46
def summarize(self): """Convert all of the values to their max values. This form is used to represent the summary level""" s = str(self.allval()) return self.parse(s[:2]+ ''.join(['Z']*len(s[2:])))
[ "def", "summarize", "(", "self", ")", ":", "s", "=", "str", "(", "self", ".", "allval", "(", ")", ")", "return", "self", ".", "parse", "(", "s", "[", ":", "2", "]", "+", "''", ".", "join", "(", "[", "'Z'", "]", "*", "len", "(", "s", "[", ...
Convert all of the values to their max values. This form is used to represent the summary level
[ "Convert", "all", "of", "the", "values", "to", "their", "max", "values", ".", "This", "form", "is", "used", "to", "represent", "the", "summary", "level" ]
python
train
tmux-python/libtmux
libtmux/session.py
https://github.com/tmux-python/libtmux/blob/8eb2f8bbea3a025c1567b1516653414dbc24e1fc/libtmux/session.py#L114-L120
def kill_session(self): """``$ tmux kill-session``.""" proc = self.cmd('kill-session', '-t%s' % self.id) if proc.stderr: raise exc.LibTmuxException(proc.stderr)
[ "def", "kill_session", "(", "self", ")", ":", "proc", "=", "self", ".", "cmd", "(", "'kill-session'", ",", "'-t%s'", "%", "self", ".", "id", ")", "if", "proc", ".", "stderr", ":", "raise", "exc", ".", "LibTmuxException", "(", "proc", ".", "stderr", "...
``$ tmux kill-session``.
[ "$", "tmux", "kill", "-", "session", "." ]
python
train
bitesofcode/projexui
projexui/widgets/xtreewidget/xtreewidget.py
https://github.com/bitesofcode/projexui/blob/f18a73bec84df90b034ca69b9deea118dbedfc4d/projexui/widgets/xtreewidget/xtreewidget.py#L1028-L1038
def highlightByAlternate(self): """ Sets the palette highlighting for this tree widget to use a darker version of the alternate color vs. the standard highlighting. """ palette = QtGui.QApplication.palette() palette.setColor(palette.HighlightedText, palette.color(palette.Text)) clr = palette.color(palette.AlternateBase) palette.setColor(palette.Highlight, clr.darker(110)) self.setPalette(palette)
[ "def", "highlightByAlternate", "(", "self", ")", ":", "palette", "=", "QtGui", ".", "QApplication", ".", "palette", "(", ")", "palette", ".", "setColor", "(", "palette", ".", "HighlightedText", ",", "palette", ".", "color", "(", "palette", ".", "Text", ")"...
Sets the palette highlighting for this tree widget to use a darker version of the alternate color vs. the standard highlighting.
[ "Sets", "the", "palette", "highlighting", "for", "this", "tree", "widget", "to", "use", "a", "darker", "version", "of", "the", "alternate", "color", "vs", ".", "the", "standard", "highlighting", "." ]
python
train
mozilla/python_moztelemetry
moztelemetry/scalar.py
https://github.com/mozilla/python_moztelemetry/blob/09ddf1ec7d953a4308dfdcb0ed968f27bd5921bb/moztelemetry/scalar.py#L85-L106
def _parse_scalars(scalars): """Parse the scalars from the YAML file content to a dictionary of ScalarType(s). :return: A dictionary { 'full.scalar.label': ScalarType } """ scalar_dict = {} # Scalars are defined in a fixed two-level hierarchy within the definition file. # The first level contains the category name, while the second level contains the # probe name (e.g. "category.name: probe: ..."). for category_name in scalars: category = scalars[category_name] for probe_name in category: # We found a scalar type. Go ahead and parse it. scalar_definition = category[probe_name] # We pass |strict_type_checks=False| as we don't want to do any check # server side. This includes skipping the checks for the required keys. scalar_info = ScalarType(category_name, probe_name, scalar_definition, strict_type_checks=False) scalar_dict[scalar_info.label] = scalar_info return scalar_dict
[ "def", "_parse_scalars", "(", "scalars", ")", ":", "scalar_dict", "=", "{", "}", "# Scalars are defined in a fixed two-level hierarchy within the definition file.", "# The first level contains the category name, while the second level contains the", "# probe name (e.g. \"category.name: probe...
Parse the scalars from the YAML file content to a dictionary of ScalarType(s). :return: A dictionary { 'full.scalar.label': ScalarType }
[ "Parse", "the", "scalars", "from", "the", "YAML", "file", "content", "to", "a", "dictionary", "of", "ScalarType", "(", "s", ")", ".", ":", "return", ":", "A", "dictionary", "{", "full", ".", "scalar", ".", "label", ":", "ScalarType", "}" ]
python
train
underworldcode/stripy
stripy-src/stripy/cartesian.py
https://github.com/underworldcode/stripy/blob/d4c3480c3e58c88489ded695eadbe7cd5bf94b48/stripy-src/stripy/cartesian.py#L871-L886
def areas(self): """ Compute the area of each triangle within the triangulation of points. Returns ------- area : array of floats, shape (nt,) area of each triangle in self.simplices where nt is the number of triangles. """ v1 = self.points[self.simplices[:,1]] - self.points[self.simplices[:,0]] v2 = self.points[self.simplices[:,2]] - self.points[self.simplices[:,1]] area = 0.5*(v1[:,0]*v2[:,1] - v1[:,1]*v2[:,0]) return area
[ "def", "areas", "(", "self", ")", ":", "v1", "=", "self", ".", "points", "[", "self", ".", "simplices", "[", ":", ",", "1", "]", "]", "-", "self", ".", "points", "[", "self", ".", "simplices", "[", ":", ",", "0", "]", "]", "v2", "=", "self", ...
Compute the area of each triangle within the triangulation of points. Returns ------- area : array of floats, shape (nt,) area of each triangle in self.simplices where nt is the number of triangles.
[ "Compute", "the", "area", "of", "each", "triangle", "within", "the", "triangulation", "of", "points", "." ]
python
train
edeposit/edeposit.amqp.ftp
src/edeposit/amqp/ftp/passwd_reader.py
https://github.com/edeposit/edeposit.amqp.ftp/blob/fcdcbffb6e5d194e1bb4f85f0b8eaa9dbb08aa71/src/edeposit/amqp/ftp/passwd_reader.py#L180-L187
def save_user_config(username, conf_dict, path=settings.LOGIN_FILE): """ Save user's configuration to otherwise unused field ``full_name`` in passwd file. """ users = load_users(path=path) users[username]["full_name"] = _encode_config(conf_dict) save_users(users, path=path)
[ "def", "save_user_config", "(", "username", ",", "conf_dict", ",", "path", "=", "settings", ".", "LOGIN_FILE", ")", ":", "users", "=", "load_users", "(", "path", "=", "path", ")", "users", "[", "username", "]", "[", "\"full_name\"", "]", "=", "_encode_conf...
Save user's configuration to otherwise unused field ``full_name`` in passwd file.
[ "Save", "user", "s", "configuration", "to", "otherwise", "unused", "field", "full_name", "in", "passwd", "file", "." ]
python
train
spacetelescope/drizzlepac
drizzlepac/imageObject.py
https://github.com/spacetelescope/drizzlepac/blob/15bec3c929a6a869d9e71b9398ced43ede0620f1/drizzlepac/imageObject.py#L604-L615
def getdarkimg(self,chip): """ Notes ===== Return an array representing the dark image for the detector. The method will return an array of the same shape as the image. :units: electrons """ sci_chip = self._image[self.scienceExt,chip] return np.ones(sci_chip.image_shape,dtype=sci_chip.image_dtype)*sci_chip.darkcurrent
[ "def", "getdarkimg", "(", "self", ",", "chip", ")", ":", "sci_chip", "=", "self", ".", "_image", "[", "self", ".", "scienceExt", ",", "chip", "]", "return", "np", ".", "ones", "(", "sci_chip", ".", "image_shape", ",", "dtype", "=", "sci_chip", ".", "...
Notes ===== Return an array representing the dark image for the detector. The method will return an array of the same shape as the image. :units: electrons
[ "Notes", "=====", "Return", "an", "array", "representing", "the", "dark", "image", "for", "the", "detector", "." ]
python
train
bwohlberg/sporco
sporco/admm/parcbpdn.py
https://github.com/bwohlberg/sporco/blob/8946a04331106f4e39904fbdf2dc7351900baa04/sporco/admm/parcbpdn.py#L181-L188
def y0astep(): r"""The serial component of the step to minimise the augmented Lagrangian with respect to :math:`\mathbf{y}_0`. """ global mp_b mp_b[:] = mp_inv_off_diag * np.sum((mp_S + mp_rho*(mp_DX+mp_U0)), axis=mp_axisM, keepdims=True)
[ "def", "y0astep", "(", ")", ":", "global", "mp_b", "mp_b", "[", ":", "]", "=", "mp_inv_off_diag", "*", "np", ".", "sum", "(", "(", "mp_S", "+", "mp_rho", "*", "(", "mp_DX", "+", "mp_U0", ")", ")", ",", "axis", "=", "mp_axisM", ",", "keepdims", "=...
r"""The serial component of the step to minimise the augmented Lagrangian with respect to :math:`\mathbf{y}_0`.
[ "r", "The", "serial", "component", "of", "the", "step", "to", "minimise", "the", "augmented", "Lagrangian", "with", "respect", "to", ":", "math", ":", "\\", "mathbf", "{", "y", "}", "_0", "." ]
python
train
mikedh/trimesh
trimesh/exchange/dae.py
https://github.com/mikedh/trimesh/blob/25e059bf6d4caa74f62ffd58ce4f61a90ee4e518/trimesh/exchange/dae.py#L366-L400
def load_zae(file_obj, resolver=None, **kwargs): """ Load a ZAE file, which is just a zipped DAE file. Parameters ------------- file_obj : file object Contains ZAE data resolver : trimesh.visual.Resolver Resolver to load additional assets kwargs : dict Passed to load_collada Returns ------------ loaded : dict Results of loading """ # a dict, {file name : file object} archive = util.decompress(file_obj, file_type='zip') # load the first file with a .dae extension file_name = next(i for i in archive.keys() if i.lower().endswith('.dae')) # a resolver so the loader can load textures / etc resolver = visual.resolvers.ZipResolver(archive) # run the regular collada loader loaded = load_collada(archive[file_name], resolver=resolver, **kwargs) return loaded
[ "def", "load_zae", "(", "file_obj", ",", "resolver", "=", "None", ",", "*", "*", "kwargs", ")", ":", "# a dict, {file name : file object}", "archive", "=", "util", ".", "decompress", "(", "file_obj", ",", "file_type", "=", "'zip'", ")", "# load the first file wi...
Load a ZAE file, which is just a zipped DAE file. Parameters ------------- file_obj : file object Contains ZAE data resolver : trimesh.visual.Resolver Resolver to load additional assets kwargs : dict Passed to load_collada Returns ------------ loaded : dict Results of loading
[ "Load", "a", "ZAE", "file", "which", "is", "just", "a", "zipped", "DAE", "file", "." ]
python
train
tcalmant/ipopo
pelix/framework.py
https://github.com/tcalmant/ipopo/blob/2f9ae0c44cd9c34ef1a9d50837b3254e75678eb1/pelix/framework.py#L1371-L1381
def update(self): """ Stops and starts the framework, if the framework is active. :raise BundleException: Something wrong occurred while stopping or starting the framework. """ with self._lock: if self._state == Bundle.ACTIVE: self.stop() self.start()
[ "def", "update", "(", "self", ")", ":", "with", "self", ".", "_lock", ":", "if", "self", ".", "_state", "==", "Bundle", ".", "ACTIVE", ":", "self", ".", "stop", "(", ")", "self", ".", "start", "(", ")" ]
Stops and starts the framework, if the framework is active. :raise BundleException: Something wrong occurred while stopping or starting the framework.
[ "Stops", "and", "starts", "the", "framework", "if", "the", "framework", "is", "active", "." ]
python
train
JasonKessler/scattertext
scattertext/TermDocMatrixWithoutCategories.py
https://github.com/JasonKessler/scattertext/blob/cacf1f687d218ee8cae3fc05cc901db824bb1b81/scattertext/TermDocMatrixWithoutCategories.py#L101-L120
def set_background_corpus(self, background): ''' Parameters ---------- background ''' if issubclass(type(background), TermDocMatrixWithoutCategories): self._background_corpus = pd.DataFrame(background .get_term_freq_df() .sum(axis=1), columns=['background']).reset_index() self._background_corpus.columns = ['word', 'background'] elif (type(background) == pd.DataFrame and set(background.columns) == set(['word', 'background'])): self._background_corpus = background else: raise Exception('The argument named background must be a subclass of TermDocMatrix or a ' \ + 'DataFrame with columns "word" and "background", where "word" ' \ + 'is the term text, and "background" is its frequency.')
[ "def", "set_background_corpus", "(", "self", ",", "background", ")", ":", "if", "issubclass", "(", "type", "(", "background", ")", ",", "TermDocMatrixWithoutCategories", ")", ":", "self", ".", "_background_corpus", "=", "pd", ".", "DataFrame", "(", "background",...
Parameters ---------- background
[ "Parameters", "----------", "background" ]
python
train
dmlc/gluon-nlp
scripts/bert/fp16_utils.py
https://github.com/dmlc/gluon-nlp/blob/4b83eb6bcc8881e5f1081a3675adaa19fac5c0ba/scripts/bert/fp16_utils.py#L26-L107
def grad_global_norm(parameters, max_norm): """Calculate the 2-norm of gradients of parameters, and how much they should be scaled down such that their 2-norm does not exceed `max_norm`. If gradients exist for more than one context for a parameter, user needs to explicitly call ``trainer.allreduce_grads`` so that the gradients are summed first before calculating the 2-norm. .. note:: This function is only for use when `update_on_kvstore` is set to False in trainer. Example:: trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...) for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]): with mx.autograd.record(): y = net(x) loss = loss_fn(y, label) loss.backward() trainer.allreduce_grads() norm, ratio = grad_global_norm(net.collect_params().values(), max_norm) trainer.update(batch_size * ratio) ... Parameters ---------- parameters : list of Parameters Returns ------- NDArray Total norm. Shape is (1,) NDArray Ratio for rescaling gradients based on max_norm s.t. grad = grad / ratio. If total norm is NaN, ratio will be NaN, too. Shape is (1,) NDArray Whether the total norm is finite. Shape is (1,) """ # collect gradient arrays arrays = [] idx = 0 for p in parameters: if p.grad_req != 'null': p_grads = p.list_grad() arrays.append(p_grads[idx % len(p_grads)]) idx += 1 assert len(arrays) > 0, 'No parameter found available for gradient norm.' # compute gradient norms def _norm(array): # TODO(haibin) norm operator does not support fp16 safe reduction. # Issue is tracked at: https://github.com/apache/incubator-mxnet/issues/14126 x = array.reshape((-1,)).astype('float32', copy=False) return nd.dot(x, x) norm_arrays = [_norm(arr) for arr in arrays] # group norm arrays by ctx def group_by_ctx(arr_list): groups = collections.defaultdict(list) for arr in arr_list: ctx = arr.context groups[ctx].append(arr) return groups norm_groups = group_by_ctx(norm_arrays) # reduce ctx, dtype = arrays[0].context, 'float32' norms = [nd.add_n(*g).as_in_context(ctx) for g in norm_groups.values()] total_norm = nd.add_n(*norms).sqrt() scale = total_norm / max_norm # is_finite = 0 if NaN or Inf, 1 otherwise. is_finite = nd.contrib.isfinite(scale) # if scale is finite, nd.maximum selects the max between scale and 1. That is, # 1 is returned if total_norm does not exceed max_norm. # if scale = NaN or Inf, the result of nd.minimum is undefined. Therefore, we use # choices.take to return NaN or Inf. scale_or_one = nd.maximum(nd.ones((1,), dtype=dtype, ctx=ctx), scale) choices = nd.concat(scale, scale_or_one, dim=0) chosen_scale = choices.take(is_finite) return total_norm, chosen_scale, is_finite
[ "def", "grad_global_norm", "(", "parameters", ",", "max_norm", ")", ":", "# collect gradient arrays", "arrays", "=", "[", "]", "idx", "=", "0", "for", "p", "in", "parameters", ":", "if", "p", ".", "grad_req", "!=", "'null'", ":", "p_grads", "=", "p", "."...
Calculate the 2-norm of gradients of parameters, and how much they should be scaled down such that their 2-norm does not exceed `max_norm`. If gradients exist for more than one context for a parameter, user needs to explicitly call ``trainer.allreduce_grads`` so that the gradients are summed first before calculating the 2-norm. .. note:: This function is only for use when `update_on_kvstore` is set to False in trainer. Example:: trainer = Trainer(net.collect_params(), update_on_kvstore=False, ...) for x, y in mx.gluon.utils.split_and_load(X, [mx.gpu(0), mx.gpu(1)]): with mx.autograd.record(): y = net(x) loss = loss_fn(y, label) loss.backward() trainer.allreduce_grads() norm, ratio = grad_global_norm(net.collect_params().values(), max_norm) trainer.update(batch_size * ratio) ... Parameters ---------- parameters : list of Parameters Returns ------- NDArray Total norm. Shape is (1,) NDArray Ratio for rescaling gradients based on max_norm s.t. grad = grad / ratio. If total norm is NaN, ratio will be NaN, too. Shape is (1,) NDArray Whether the total norm is finite. Shape is (1,)
[ "Calculate", "the", "2", "-", "norm", "of", "gradients", "of", "parameters", "and", "how", "much", "they", "should", "be", "scaled", "down", "such", "that", "their", "2", "-", "norm", "does", "not", "exceed", "max_norm", "." ]
python
train
hobson/aima
aima/games.py
https://github.com/hobson/aima/blob/3572b2fb92039b4a1abe384be8545560fbd3d470/aima/games.py#L10-L34
def minimax_decision(state, game): """Given a state in a game, calculate the best move by searching forward all the way to the terminal states. [Fig. 5.3]""" player = game.to_move(state) def max_value(state): if game.terminal_test(state): return game.utility(state, player) v = -infinity for a in game.actions(state): v = max(v, min_value(game.result(state, a))) return v def min_value(state): if game.terminal_test(state): return game.utility(state, player) v = infinity for a in game.actions(state): v = min(v, max_value(game.result(state, a))) return v # Body of minimax_decision: return argmax(game.actions(state), lambda a: min_value(game.result(state, a)))
[ "def", "minimax_decision", "(", "state", ",", "game", ")", ":", "player", "=", "game", ".", "to_move", "(", "state", ")", "def", "max_value", "(", "state", ")", ":", "if", "game", ".", "terminal_test", "(", "state", ")", ":", "return", "game", ".", "...
Given a state in a game, calculate the best move by searching forward all the way to the terminal states. [Fig. 5.3]
[ "Given", "a", "state", "in", "a", "game", "calculate", "the", "best", "move", "by", "searching", "forward", "all", "the", "way", "to", "the", "terminal", "states", ".", "[", "Fig", ".", "5", ".", "3", "]" ]
python
valid
sorgerlab/indra
indra/belief/wm_scorer.py
https://github.com/sorgerlab/indra/blob/79a70415832c5702d7a820c7c9ccc8e25010124b/indra/belief/wm_scorer.py#L38-L57
def get_eidos_scorer(): """Return a SimpleScorer based on Eidos curated precision estimates.""" table = load_eidos_curation_table() # Get the overall precision total_num = table['COUNT of RULE'].sum() weighted_sum = table['COUNT of RULE'].dot(table['% correct']) precision = weighted_sum / total_num # We have to divide this into a random and systematic component, for now # in an ad-hoc manner syst_error = 0.05 rand_error = 1 - precision - syst_error prior_probs = {'rand': {'eidos': rand_error}, 'syst': {'eidos': syst_error}} # Get a dict of rule-specific errors. subtype_probs = {'eidos': {k: 1.0-min(v, 0.95)-syst_error for k, v in zip(table['RULE'], table['% correct'])}} scorer = SimpleScorer(prior_probs, subtype_probs) return scorer
[ "def", "get_eidos_scorer", "(", ")", ":", "table", "=", "load_eidos_curation_table", "(", ")", "# Get the overall precision", "total_num", "=", "table", "[", "'COUNT of RULE'", "]", ".", "sum", "(", ")", "weighted_sum", "=", "table", "[", "'COUNT of RULE'", "]", ...
Return a SimpleScorer based on Eidos curated precision estimates.
[ "Return", "a", "SimpleScorer", "based", "on", "Eidos", "curated", "precision", "estimates", "." ]
python
train
ehazlett/ignition
ignition/django.py
https://github.com/ehazlett/ignition/blob/618776fccd199c4613e105ee55955b40e52d3e68/ignition/django.py#L39-L59
def create_project(self): ''' Creates a base Django project ''' if os.path.exists(self._py): prj_dir = os.path.join(self._app_dir, self._project_name) if os.path.exists(prj_dir): if self._force: logging.warn('Removing existing project') shutil.rmtree(prj_dir) else: logging.warn('Found existing project; not creating (use --force to overwrite)') return logging.info('Creating project') p = subprocess.Popen('cd {0} ; {1} startproject {2} > /dev/null'.format(self._app_dir, self._ve_dir + os.sep + self._project_name + \ os.sep + 'bin' + os.sep + 'django-admin.py', self._project_name), \ shell=True) os.waitpid(p.pid, 0) else: logging.error('Unable to find Python interpreter in virtualenv') return
[ "def", "create_project", "(", "self", ")", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "_py", ")", ":", "prj_dir", "=", "os", ".", "path", ".", "join", "(", "self", ".", "_app_dir", ",", "self", ".", "_project_name", ")", "if", ...
Creates a base Django project
[ "Creates", "a", "base", "Django", "project" ]
python
valid
etal/biocma
biocma/cma.py
https://github.com/etal/biocma/blob/eac0c57eb83a9498e53ccdeb9cbc3fe21a5826a7/biocma/cma.py#L78-L141
def _parse_sequences(ilines, expect_qlen): """Parse the sequences in the current block. Sequence looks like: $3=227(209): >gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75 {()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}* """ while True: first = next(ilines) if first.startswith('_') and first.endswith('].'): # End of sequences & end of block break # ENH: handle wrapped lines? try: index, this_len, query_len = _parse_seq_preheader(first) except ValueError: logging.warn('Unparseable line (SKIPPING):\n%s', first) continue (rec_id, dbxrefs, headlen, taillen, phylum, taxchar, description ) = _parse_seq_header(next(ilines)) try: headseq, molseq, tailseq = _parse_seq_body(next(ilines)) except ValueError: logging.warn('Unparseable sequence: %s -- SKIPPING', rec_id) continue # Validation if expect_qlen != query_len: logging.warn("Query length in %s given as %d; expected %d", rec_id, query_len, expect_qlen) if not headseq and not headlen: headlen = 0 if not tailseq and not taillen: taillen = 0 if headseq: if headlen is None: headlen = len(headseq) elif headlen != len(headseq): logging.warn("Conflicting head flank lengths in %s: %d, %d", rec_id, headlen, len(headseq)) if tailseq: if taillen is None: taillen = len(tailseq) elif taillen != len(tailseq): logging.warn("Conflicting tail flank lengths in %s: %d, %d", rec_id, taillen, len(tailseq)) yield {'index': index, 'id': rec_id, 'description': description, 'dbxrefs': dbxrefs, 'phylum': phylum, 'taxchar': taxchar, 'head_len': headlen, 'tail_len': taillen, 'head_seq': headseq, 'tail_seq': tailseq, 'length': this_len, 'seq': molseq, }
[ "def", "_parse_sequences", "(", "ilines", ",", "expect_qlen", ")", ":", "while", "True", ":", "first", "=", "next", "(", "ilines", ")", "if", "first", ".", "startswith", "(", "'_'", ")", "and", "first", ".", "endswith", "(", "'].'", ")", ":", "# End of...
Parse the sequences in the current block. Sequence looks like: $3=227(209): >gi|15606894|ref|NP_214275.1| {|2(244)|<Aquificae(B)>}DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|2984127|gb|AAC07663.1| DNA polymerase III gamma subunit [Aquifex aeolicus VF5] >gi|75 {()YVPFARKYRPKFFREVIGQEAPVRILKNAIKNDRVAHaYLFAGPRGVGKTTIARILAKALNcknpskgepcgecencreiDRGVFPDLIEMDAASNRGIDDVRA-LKEAVNYKPIKG-KYKVYIIDEAHMLTKEAFNALLKTLEEPPPRTVFVLCTTEYDKILPTILSRCQRIIFSKVRKEKVIEYLKKICEKEGIECEEGALEVLAHASEGCMRDAASLLDQASVYGE()}*
[ "Parse", "the", "sequences", "in", "the", "current", "block", "." ]
python
train
data-8/datascience
datascience/tables.py
https://github.com/data-8/datascience/blob/4cee38266903ca169cea4a53b8cc39502d85c464/datascience/tables.py#L89-L92
def from_rows(cls, rows, labels): """Create a table from a sequence of rows (fixed-length sequences). [Deprecated]""" warnings.warn("Table.from_rows is deprecated. Use Table(labels).with_rows(...)", FutureWarning) return cls(labels).with_rows(rows)
[ "def", "from_rows", "(", "cls", ",", "rows", ",", "labels", ")", ":", "warnings", ".", "warn", "(", "\"Table.from_rows is deprecated. Use Table(labels).with_rows(...)\"", ",", "FutureWarning", ")", "return", "cls", "(", "labels", ")", ".", "with_rows", "(", "rows"...
Create a table from a sequence of rows (fixed-length sequences). [Deprecated]
[ "Create", "a", "table", "from", "a", "sequence", "of", "rows", "(", "fixed", "-", "length", "sequences", ")", ".", "[", "Deprecated", "]" ]
python
train
mongodb/mongo-python-driver
bson/__init__.py
https://github.com/mongodb/mongo-python-driver/blob/c29c21449e3aae74154207058cf85fd94018d4cd/bson/__init__.py#L969-L1003
def _decode_all_selective(data, codec_options, fields): """Decode BSON data to a single document while using user-provided custom decoding logic. `data` must be a string representing a valid, BSON-encoded document. :Parameters: - `data`: BSON data - `codec_options`: An instance of :class:`~bson.codec_options.CodecOptions` with user-specified type decoders. If no decoders are found, this method is the same as ``decode_all``. - `fields`: Map of document namespaces where data that needs to be custom decoded lives or None. For example, to custom decode a list of objects in 'field1.subfield1', the specified value should be ``{'field1': {'subfield1': 1}}``. If ``fields`` is an empty map or None, this method is the same as ``decode_all``. :Returns: - `document_list`: Single-member list containing the decoded document. .. versionadded:: 3.8 """ if not codec_options.type_registry._decoder_map: return decode_all(data, codec_options) if not fields: return decode_all(data, codec_options.with_options(type_registry=None)) # Decode documents for internal use. from bson.raw_bson import RawBSONDocument internal_codec_options = codec_options.with_options( document_class=RawBSONDocument, type_registry=None) _doc = _bson_to_dict(data, internal_codec_options) return [_decode_selective(_doc, fields, codec_options,)]
[ "def", "_decode_all_selective", "(", "data", ",", "codec_options", ",", "fields", ")", ":", "if", "not", "codec_options", ".", "type_registry", ".", "_decoder_map", ":", "return", "decode_all", "(", "data", ",", "codec_options", ")", "if", "not", "fields", ":"...
Decode BSON data to a single document while using user-provided custom decoding logic. `data` must be a string representing a valid, BSON-encoded document. :Parameters: - `data`: BSON data - `codec_options`: An instance of :class:`~bson.codec_options.CodecOptions` with user-specified type decoders. If no decoders are found, this method is the same as ``decode_all``. - `fields`: Map of document namespaces where data that needs to be custom decoded lives or None. For example, to custom decode a list of objects in 'field1.subfield1', the specified value should be ``{'field1': {'subfield1': 1}}``. If ``fields`` is an empty map or None, this method is the same as ``decode_all``. :Returns: - `document_list`: Single-member list containing the decoded document. .. versionadded:: 3.8
[ "Decode", "BSON", "data", "to", "a", "single", "document", "while", "using", "user", "-", "provided", "custom", "decoding", "logic", "." ]
python
train
juju/python-libjuju
juju/client/_client1.py
https://github.com/juju/python-libjuju/blob/58f0011f4c57cd68830258952fa952eaadca6b38/juju/client/_client1.py#L5086-L5099
async def ReportKeys(self, entity_keys): ''' entity_keys : typing.Sequence[~SSHHostKeys] Returns -> typing.Sequence[~ErrorResult] ''' # map input types to rpc msg _params = dict() msg = dict(type='HostKeyReporter', request='ReportKeys', version=1, params=_params) _params['entity-keys'] = entity_keys reply = await self.rpc(msg) return reply
[ "async", "def", "ReportKeys", "(", "self", ",", "entity_keys", ")", ":", "# map input types to rpc msg", "_params", "=", "dict", "(", ")", "msg", "=", "dict", "(", "type", "=", "'HostKeyReporter'", ",", "request", "=", "'ReportKeys'", ",", "version", "=", "1...
entity_keys : typing.Sequence[~SSHHostKeys] Returns -> typing.Sequence[~ErrorResult]
[ "entity_keys", ":", "typing", ".", "Sequence", "[", "~SSHHostKeys", "]", "Returns", "-", ">", "typing", ".", "Sequence", "[", "~ErrorResult", "]" ]
python
train
jciskey/pygraph
pygraph/classes/directed_graph.py
https://github.com/jciskey/pygraph/blob/037bb2f32503fecb60d62921f9766d54109f15e2/pygraph/classes/directed_graph.py#L217-L223
def move_edge_target(self, edge_id, node_a): """Moves an edge so that it targets node_a.""" # Grab the edge edge = self.get_edge(edge_id) # Alter the vertices edge['vertices'] = (edge['vertices'][0], node_a)
[ "def", "move_edge_target", "(", "self", ",", "edge_id", ",", "node_a", ")", ":", "# Grab the edge", "edge", "=", "self", ".", "get_edge", "(", "edge_id", ")", "# Alter the vertices", "edge", "[", "'vertices'", "]", "=", "(", "edge", "[", "'vertices'", "]", ...
Moves an edge so that it targets node_a.
[ "Moves", "an", "edge", "so", "that", "it", "targets", "node_a", "." ]
python
train
stevepeak/debris
debris/tornado/__init__.py
https://github.com/stevepeak/debris/blob/62ffde573a880c022bd4876ec836b32953225eea/debris/tornado/__init__.py#L18-L47
def cached(namespace=None, service="memory", debug=False): """ Wrapper for tornado requests. Example ``` class MainHandler(tornado.web.RequestHandler): @debris.tornado.cached("home-page") def get(self): self.write("Hello, world") ``` """ _service = getattr(debris.services, service) def wrapper(_f): @functools.wraps(_f) def _stash(self, *a, **k): if debug is False: # this request is cacheable try: self.finish(_service.get(namespace)) except LookupError: _replace_finish(self, namespace, _service) # get the result of this request _f(self, *a, **k) return # request is not cacheable _f(self, *a, **k) return _stash return wrapper
[ "def", "cached", "(", "namespace", "=", "None", ",", "service", "=", "\"memory\"", ",", "debug", "=", "False", ")", ":", "_service", "=", "getattr", "(", "debris", ".", "services", ",", "service", ")", "def", "wrapper", "(", "_f", ")", ":", "@", "fun...
Wrapper for tornado requests. Example ``` class MainHandler(tornado.web.RequestHandler): @debris.tornado.cached("home-page") def get(self): self.write("Hello, world") ```
[ "Wrapper", "for", "tornado", "requests", ".", "Example" ]
python
train
pygobject/pgi
pgi/overrides/GLib.py
https://github.com/pygobject/pgi/blob/2090435df6241a15ec2a78379a36b738b728652c/pgi/overrides/GLib.py#L342-L394
def unpack(self): """Decompose a GVariant into a native Python object.""" LEAF_ACCESSORS = { 'b': self.get_boolean, 'y': self.get_byte, 'n': self.get_int16, 'q': self.get_uint16, 'i': self.get_int32, 'u': self.get_uint32, 'x': self.get_int64, 't': self.get_uint64, 'h': self.get_handle, 'd': self.get_double, 's': self.get_string, 'o': self.get_string, # object path 'g': self.get_string, # signature } # simple values la = LEAF_ACCESSORS.get(self.get_type_string()) if la: return la() # tuple if self.get_type_string().startswith('('): res = [self.get_child_value(i).unpack() for i in range(self.n_children())] return tuple(res) # dictionary if self.get_type_string().startswith('a{'): res = {} for i in range(self.n_children()): v = self.get_child_value(i) res[v.get_child_value(0).unpack()] = v.get_child_value(1).unpack() return res # array if self.get_type_string().startswith('a'): return [self.get_child_value(i).unpack() for i in range(self.n_children())] # variant (just unbox transparently) if self.get_type_string().startswith('v'): return self.get_variant().unpack() # maybe if self.get_type_string().startswith('m'): m = self.get_maybe() return m.unpack() if m else None raise NotImplementedError('unsupported GVariant type ' + self.get_type_string())
[ "def", "unpack", "(", "self", ")", ":", "LEAF_ACCESSORS", "=", "{", "'b'", ":", "self", ".", "get_boolean", ",", "'y'", ":", "self", ".", "get_byte", ",", "'n'", ":", "self", ".", "get_int16", ",", "'q'", ":", "self", ".", "get_uint16", ",", "'i'", ...
Decompose a GVariant into a native Python object.
[ "Decompose", "a", "GVariant", "into", "a", "native", "Python", "object", "." ]
python
train
volafiled/python-volapi
volapi/file.py
https://github.com/volafiled/python-volapi/blob/5f0bc03dbde703264ac6ed494e2050761f688a3e/volapi/file.py#L156-L159
def timeout(self, duration=3600): """ Timeout the uploader of this file """ self.room.check_owner() self.conn.make_call("timeoutFile", self.fid, duration)
[ "def", "timeout", "(", "self", ",", "duration", "=", "3600", ")", ":", "self", ".", "room", ".", "check_owner", "(", ")", "self", ".", "conn", ".", "make_call", "(", "\"timeoutFile\"", ",", "self", ".", "fid", ",", "duration", ")" ]
Timeout the uploader of this file
[ "Timeout", "the", "uploader", "of", "this", "file" ]
python
train
ray-project/ray
python/ray/function_manager.py
https://github.com/ray-project/ray/blob/4eade036a0505e244c976f36aaa2d64386b5129b/python/ray/function_manager.py#L560-L574
def _publish_actor_class_to_key(self, key, actor_class_info): """Push an actor class definition to Redis. The is factored out as a separate function because it is also called on cached actor class definitions when a worker connects for the first time. Args: key: The key to store the actor class info at. actor_class_info: Information about the actor class. """ # We set the driver ID here because it may not have been available when # the actor class was defined. self._worker.redis_client.hmset(key, actor_class_info) self._worker.redis_client.rpush("Exports", key)
[ "def", "_publish_actor_class_to_key", "(", "self", ",", "key", ",", "actor_class_info", ")", ":", "# We set the driver ID here because it may not have been available when", "# the actor class was defined.", "self", ".", "_worker", ".", "redis_client", ".", "hmset", "(", "key"...
Push an actor class definition to Redis. The is factored out as a separate function because it is also called on cached actor class definitions when a worker connects for the first time. Args: key: The key to store the actor class info at. actor_class_info: Information about the actor class.
[ "Push", "an", "actor", "class", "definition", "to", "Redis", "." ]
python
train
prompt-toolkit/pyvim
pyvim/commands/preview.py
https://github.com/prompt-toolkit/pyvim/blob/5928b53b9d700863c1a06d2181a034a955f94594/pyvim/commands/preview.py#L16-L29
def save(self): """ Back up current editor state. """ e = self.editor self._style = e.current_style self._show_line_numbers = e.show_line_numbers self._highlight_search = e.highlight_search self._show_ruler = e.show_ruler self._relative_number = e.relative_number self._cursorcolumn = e.cursorcolumn self._cursorline = e.cursorline self._colorcolumn = e.colorcolumn
[ "def", "save", "(", "self", ")", ":", "e", "=", "self", ".", "editor", "self", ".", "_style", "=", "e", ".", "current_style", "self", ".", "_show_line_numbers", "=", "e", ".", "show_line_numbers", "self", ".", "_highlight_search", "=", "e", ".", "highlig...
Back up current editor state.
[ "Back", "up", "current", "editor", "state", "." ]
python
train
synw/dataswim
dataswim/data/transform/calculations.py
https://github.com/synw/dataswim/blob/4a4a53f80daa7cd8e8409d76a19ce07296269da2/dataswim/data/transform/calculations.py#L41-L72
def diffp(self, diffcol: str, name: str="Diff"): """ Add a diff column to the main dataframe: calculate the diff from the previous value :param diffcol: column to diff from :type diffcol: str :param name: diff column name, defaults to "Diff" :type name: str, optional :example: ``ds.diffp("Col 1", "New col")`` """ try: df = self.df.copy() previous = 0 i = 0 vals = [df[diffcol].iloc[0]] for _, row in df.iterrows(): val = row[diffcol] - previous new = round(val, 1) previous = row[diffcol] if i == 0: vals = [0] else: vals.append(new) i = 1 self.df = df self.add(name, vals) except Exception as e: self.err(e, self._append, "Can not diff column") return self.ok("Diff column " + name + " added to the dataframe")
[ "def", "diffp", "(", "self", ",", "diffcol", ":", "str", ",", "name", ":", "str", "=", "\"Diff\"", ")", ":", "try", ":", "df", "=", "self", ".", "df", ".", "copy", "(", ")", "previous", "=", "0", "i", "=", "0", "vals", "=", "[", "df", "[", ...
Add a diff column to the main dataframe: calculate the diff from the previous value :param diffcol: column to diff from :type diffcol: str :param name: diff column name, defaults to "Diff" :type name: str, optional :example: ``ds.diffp("Col 1", "New col")``
[ "Add", "a", "diff", "column", "to", "the", "main", "dataframe", ":", "calculate", "the", "diff", "from", "the", "previous", "value" ]
python
train
cakebread/yolk
yolk/yolklib.py
https://github.com/cakebread/yolk/blob/ee8c9f529a542d9c5eff4fe69b9c7906c802e4d8/yolk/yolklib.py#L154-L172
def get_highest_version(versions): """ Returns highest available version for a package in a list of versions Uses pkg_resources to parse the versions @param versions: List of PyPI package versions @type versions: List of strings @returns: string of a PyPI package version """ sorted_versions = [] for ver in versions: sorted_versions.append((pkg_resources.parse_version(ver), ver)) sorted_versions = sorted(sorted_versions) sorted_versions.reverse() return sorted_versions[0][1]
[ "def", "get_highest_version", "(", "versions", ")", ":", "sorted_versions", "=", "[", "]", "for", "ver", "in", "versions", ":", "sorted_versions", ".", "append", "(", "(", "pkg_resources", ".", "parse_version", "(", "ver", ")", ",", "ver", ")", ")", "sorte...
Returns highest available version for a package in a list of versions Uses pkg_resources to parse the versions @param versions: List of PyPI package versions @type versions: List of strings @returns: string of a PyPI package version
[ "Returns", "highest", "available", "version", "for", "a", "package", "in", "a", "list", "of", "versions", "Uses", "pkg_resources", "to", "parse", "the", "versions" ]
python
train
splunk/splunk-sdk-python
splunklib/modularinput/scheme.py
https://github.com/splunk/splunk-sdk-python/blob/a245a4eeb93b3621730418008e31715912bcdcd8/splunklib/modularinput/scheme.py#L55-L85
def to_xml(self): """Creates an ``ET.Element`` representing self, then returns it. :returns root, an ``ET.Element`` representing this scheme. """ root = ET.Element("scheme") ET.SubElement(root, "title").text = self.title # add a description subelement if it's defined if self.description is not None: ET.SubElement(root, "description").text = self.description # add all other subelements to this Scheme, represented by (tag, text) subelements = [ ("use_external_validation", self.use_external_validation), ("use_single_instance", self.use_single_instance), ("streaming_mode", self.streaming_mode) ] for name, value in subelements: ET.SubElement(root, name).text = str(value).lower() endpoint = ET.SubElement(root, "endpoint") args = ET.SubElement(endpoint, "args") # add arguments as subelements to the <args> element for arg in self.arguments: arg.add_to_document(args) return root
[ "def", "to_xml", "(", "self", ")", ":", "root", "=", "ET", ".", "Element", "(", "\"scheme\"", ")", "ET", ".", "SubElement", "(", "root", ",", "\"title\"", ")", ".", "text", "=", "self", ".", "title", "# add a description subelement if it's defined", "if", ...
Creates an ``ET.Element`` representing self, then returns it. :returns root, an ``ET.Element`` representing this scheme.
[ "Creates", "an", "ET", ".", "Element", "representing", "self", "then", "returns", "it", "." ]
python
train
benhoff/pluginmanager
pluginmanager/file_manager.py
https://github.com/benhoff/pluginmanager/blob/a8a184f9ebfbb521703492cb88c1dbda4cd04c06/pluginmanager/file_manager.py#L53-L73
def collect_filepaths(self, directories): """ Collects and returns every filepath from each directory in `directories` that is filtered through the `file_filters`. If no `file_filters` are present, passes every file in directory as a result. Always returns a `set` object `directories` can be a object or an iterable. Recommend using absolute paths. """ plugin_filepaths = set() directories = util.to_absolute_paths(directories) for directory in directories: filepaths = util.get_filepaths_from_dir(directory) filepaths = self._filter_filepaths(filepaths) plugin_filepaths.update(set(filepaths)) plugin_filepaths = self._remove_blacklisted(plugin_filepaths) return plugin_filepaths
[ "def", "collect_filepaths", "(", "self", ",", "directories", ")", ":", "plugin_filepaths", "=", "set", "(", ")", "directories", "=", "util", ".", "to_absolute_paths", "(", "directories", ")", "for", "directory", "in", "directories", ":", "filepaths", "=", "uti...
Collects and returns every filepath from each directory in `directories` that is filtered through the `file_filters`. If no `file_filters` are present, passes every file in directory as a result. Always returns a `set` object `directories` can be a object or an iterable. Recommend using absolute paths.
[ "Collects", "and", "returns", "every", "filepath", "from", "each", "directory", "in", "directories", "that", "is", "filtered", "through", "the", "file_filters", ".", "If", "no", "file_filters", "are", "present", "passes", "every", "file", "in", "directory", "as"...
python
train
sdonk/django-admin-ip-restrictor
admin_ip_restrictor/middleware.py
https://github.com/sdonk/django-admin-ip-restrictor/blob/29c948677e52bc416d44fff0f013d1f4ba2cb782/admin_ip_restrictor/middleware.py#L60-L71
def is_blocked(self, ip): """Determine if an IP address should be considered blocked.""" blocked = True if ip in self.allowed_admin_ips: blocked = False for allowed_range in self.allowed_admin_ip_ranges: if ipaddress.ip_address(ip) in ipaddress.ip_network(allowed_range): blocked = False return blocked
[ "def", "is_blocked", "(", "self", ",", "ip", ")", ":", "blocked", "=", "True", "if", "ip", "in", "self", ".", "allowed_admin_ips", ":", "blocked", "=", "False", "for", "allowed_range", "in", "self", ".", "allowed_admin_ip_ranges", ":", "if", "ipaddress", "...
Determine if an IP address should be considered blocked.
[ "Determine", "if", "an", "IP", "address", "should", "be", "considered", "blocked", "." ]
python
train
awslabs/mxboard
python/mxboard/writer.py
https://github.com/awslabs/mxboard/blob/36057ff0f05325c9dc2fe046521325bf9d563a88/python/mxboard/writer.py#L482-L546
def add_embedding(self, tag, embedding, labels=None, images=None, global_step=None): """Adds embedding projector data to the event file. It will also create a config file used by the embedding projector in TensorBoard. The folder containing the embedding data is named using the formula: If global_step is not None, the folder name is `tag + '_' + str(global_step).zfill(6)`; else, the folder name is `tag`. For example, tag = 'mnist', global_step = 12, the folder's name is 'mnist_000012'; when global_step = None, the folder's name is 'mnist'. See the following reference for the meanings of labels and images. Ref: https://www.tensorflow.org/versions/r1.2/get_started/embedding_viz Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str Name for the `embedding`. embedding : MXNet `NDArray` or `numpy.ndarray` A matrix whose each row is the feature vector of a data point. labels : MXNet `NDArray` or `numpy.ndarray` or a list of elements convertible to str. Labels corresponding to the data points in the `embedding`. If the labels are 2D the first row is considered the column names. images : MXNet `NDArray` or `numpy.ndarray` Images of format NCHW corresponding to the data points in the `embedding`. global_step : int Global step value to record. If not set, default to zero. """ embedding_shape = embedding.shape if len(embedding_shape) != 2: raise ValueError('expected 2D NDArray as embedding data, while received an array with' ' ndim=%d' % len(embedding_shape)) data_dir = _get_embedding_dir(tag, global_step) save_path = os.path.join(self.get_logdir(), data_dir) try: os.makedirs(save_path) except OSError: logging.warning('embedding dir %s exists, files under this dir will be overwritten', save_path) if labels is not None: if (embedding_shape[0] != len(labels) and (not _is_2D_matrix(labels) or len(labels) != embedding_shape[0] + 1)): raise ValueError('expected equal values of embedding first dim and length of ' 'labels or embedding first dim + 1 for 2d labels ' ', while received %d and %d for each' % (embedding_shape[0], len(labels))) if self._logger is not None: self._logger.info('saved embedding labels to %s', save_path) _make_metadata_tsv(labels, save_path) if images is not None: img_labels_shape = images.shape if embedding_shape[0] != img_labels_shape[0]: raise ValueError('expected equal first dim size of embedding and images,' ' while received %d and %d for each' % (embedding_shape[0], img_labels_shape[0])) if self._logger is not None: self._logger.info('saved embedding images to %s', save_path) _make_sprite_image(images, save_path) if self._logger is not None: self._logger.info('saved embedding data to %s', save_path) _save_embedding_tsv(embedding, save_path) _add_embedding_config(self.get_logdir(), data_dir, labels is not None, images.shape if images is not None else None)
[ "def", "add_embedding", "(", "self", ",", "tag", ",", "embedding", ",", "labels", "=", "None", ",", "images", "=", "None", ",", "global_step", "=", "None", ")", ":", "embedding_shape", "=", "embedding", ".", "shape", "if", "len", "(", "embedding_shape", ...
Adds embedding projector data to the event file. It will also create a config file used by the embedding projector in TensorBoard. The folder containing the embedding data is named using the formula: If global_step is not None, the folder name is `tag + '_' + str(global_step).zfill(6)`; else, the folder name is `tag`. For example, tag = 'mnist', global_step = 12, the folder's name is 'mnist_000012'; when global_step = None, the folder's name is 'mnist'. See the following reference for the meanings of labels and images. Ref: https://www.tensorflow.org/versions/r1.2/get_started/embedding_viz Note: This function internally calls `asnumpy()` for MXNet `NDArray` inputs. Since `asnumpy()` is a blocking function call, this function would block the main thread till it returns. It may consequently affect the performance of async execution of the MXNet engine. Parameters ---------- tag : str Name for the `embedding`. embedding : MXNet `NDArray` or `numpy.ndarray` A matrix whose each row is the feature vector of a data point. labels : MXNet `NDArray` or `numpy.ndarray` or a list of elements convertible to str. Labels corresponding to the data points in the `embedding`. If the labels are 2D the first row is considered the column names. images : MXNet `NDArray` or `numpy.ndarray` Images of format NCHW corresponding to the data points in the `embedding`. global_step : int Global step value to record. If not set, default to zero.
[ "Adds", "embedding", "projector", "data", "to", "the", "event", "file", ".", "It", "will", "also", "create", "a", "config", "file", "used", "by", "the", "embedding", "projector", "in", "TensorBoard", ".", "The", "folder", "containing", "the", "embedding", "d...
python
train
opinkerfi/nago
nago/extensions/plugins.py
https://github.com/opinkerfi/nago/blob/85e1bdd1de0122f56868a483e7599e1b36a439b0/nago/extensions/plugins.py#L13-L19
def get(search="unsigned"): """ List all available plugins""" plugins = [] for i in os.walk('/usr/lib/nagios/plugins'): for f in i[2]: plugins.append(f) return plugins
[ "def", "get", "(", "search", "=", "\"unsigned\"", ")", ":", "plugins", "=", "[", "]", "for", "i", "in", "os", ".", "walk", "(", "'/usr/lib/nagios/plugins'", ")", ":", "for", "f", "in", "i", "[", "2", "]", ":", "plugins", ".", "append", "(", "f", ...
List all available plugins
[ "List", "all", "available", "plugins" ]
python
train
ronaldguillen/wave
wave/reverse.py
https://github.com/ronaldguillen/wave/blob/20bb979c917f7634d8257992e6d449dc751256a9/wave/reverse.py#L15-L32
def preserve_builtin_query_params(url, request=None): """ Given an incoming request, and an outgoing URL representation, append the value of any built-in query parameters. """ if request is None: return url overrides = [ api_settings.URL_FORMAT_OVERRIDE, ] for param in overrides: if param and (param in request.GET): value = request.GET[param] url = replace_query_param(url, param, value) return url
[ "def", "preserve_builtin_query_params", "(", "url", ",", "request", "=", "None", ")", ":", "if", "request", "is", "None", ":", "return", "url", "overrides", "=", "[", "api_settings", ".", "URL_FORMAT_OVERRIDE", ",", "]", "for", "param", "in", "overrides", ":...
Given an incoming request, and an outgoing URL representation, append the value of any built-in query parameters.
[ "Given", "an", "incoming", "request", "and", "an", "outgoing", "URL", "representation", "append", "the", "value", "of", "any", "built", "-", "in", "query", "parameters", "." ]
python
train
tensorflow/tensor2tensor
tensor2tensor/models/research/lm_experiments.py
https://github.com/tensorflow/tensor2tensor/blob/272500b6efe353aeb638d2745ed56e519462ca31/tensor2tensor/models/research/lm_experiments.py#L45-L60
def lmx_base(): """Transformer on languagemodel_lm1b32k_packed. 50M Params.""" hparams = transformer.transformer_tpu() # sharing is counterproductive when underparameterized hparams.shared_embedding_and_softmax_weights = False # we judge by log-ppl, so label smoothing hurts. hparams.label_smoothing = 0.0 # This makes the batch size on GPU the same as on TPU for a packed problem # with sequence length 256. # TODO(noam): fix the mess that is the data reading pipeline. hparams.max_length = 256 # larger batch since we only have a decoder hparams.batch_size = 4096 # save some memory so we can have a larger model hparams.activation_dtype = "bfloat16" return hparams
[ "def", "lmx_base", "(", ")", ":", "hparams", "=", "transformer", ".", "transformer_tpu", "(", ")", "# sharing is counterproductive when underparameterized", "hparams", ".", "shared_embedding_and_softmax_weights", "=", "False", "# we judge by log-ppl, so label smoothing hurts.", ...
Transformer on languagemodel_lm1b32k_packed. 50M Params.
[ "Transformer", "on", "languagemodel_lm1b32k_packed", ".", "50M", "Params", "." ]
python
train
cloudify-cosmo/repex
repex.py
https://github.com/cloudify-cosmo/repex/blob/589e442857fa4a99fa88670d7df1a72f983bbd28/repex.py#L72-L83
def _import_yaml(config_file_path): """Return a configuration object """ try: logger.info('Importing config %s...', config_file_path) with open(config_file_path) as config_file: return yaml.safe_load(config_file.read()) except IOError as ex: raise RepexError('{0}: {1} ({2})'.format( ERRORS['config_file_not_found'], config_file_path, ex)) except (yaml.parser.ParserError, yaml.scanner.ScannerError) as ex: raise RepexError('{0} ({1})'.format(ERRORS['invalid_yaml'], ex))
[ "def", "_import_yaml", "(", "config_file_path", ")", ":", "try", ":", "logger", ".", "info", "(", "'Importing config %s...'", ",", "config_file_path", ")", "with", "open", "(", "config_file_path", ")", "as", "config_file", ":", "return", "yaml", ".", "safe_load"...
Return a configuration object
[ "Return", "a", "configuration", "object" ]
python
train
materialsproject/pymatgen
pymatgen/util/provenance.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/util/provenance.py#L300-L340
def from_structures(cls, structures, authors, projects=None, references='', remarks=None, data=None, histories=None, created_at=None): """ A convenience method for getting a list of StructureNL objects by specifying structures and metadata separately. Some of the metadata is applied to all of the structures for ease of use. Args: structures: A list of Structure objects authors: *List* of {"name":'', "email":''} dicts, *list* of Strings as 'John Doe <johndoe@gmail.com>', or a single String with commas separating authors projects: List of Strings ['Project A', 'Project B']. This applies to all structures. references: A String in BibTeX format. Again, this applies to all structures. remarks: List of Strings ['Remark A', 'Remark B'] data: A list of free form dict. Namespaced at the root level with an underscore, e.g. {"_materialsproject":<custom data>} . The length of data should be the same as the list of structures if not None. histories: List of list of dicts - [[{'name':'', 'url':'', 'description':{}}], ...] The length of histories should be the same as the list of structures if not None. created_at: A datetime object """ data = [{}] * len(structures) if data is None else data histories = [[]] * len(structures) if histories is None else \ histories snl_list = [] for i, struct in enumerate(structures): snl = StructureNL(struct, authors, projects=projects, references=references, remarks=remarks, data=data[i], history=histories[i], created_at=created_at) snl_list.append(snl) return snl_list
[ "def", "from_structures", "(", "cls", ",", "structures", ",", "authors", ",", "projects", "=", "None", ",", "references", "=", "''", ",", "remarks", "=", "None", ",", "data", "=", "None", ",", "histories", "=", "None", ",", "created_at", "=", "None", "...
A convenience method for getting a list of StructureNL objects by specifying structures and metadata separately. Some of the metadata is applied to all of the structures for ease of use. Args: structures: A list of Structure objects authors: *List* of {"name":'', "email":''} dicts, *list* of Strings as 'John Doe <johndoe@gmail.com>', or a single String with commas separating authors projects: List of Strings ['Project A', 'Project B']. This applies to all structures. references: A String in BibTeX format. Again, this applies to all structures. remarks: List of Strings ['Remark A', 'Remark B'] data: A list of free form dict. Namespaced at the root level with an underscore, e.g. {"_materialsproject":<custom data>} . The length of data should be the same as the list of structures if not None. histories: List of list of dicts - [[{'name':'', 'url':'', 'description':{}}], ...] The length of histories should be the same as the list of structures if not None. created_at: A datetime object
[ "A", "convenience", "method", "for", "getting", "a", "list", "of", "StructureNL", "objects", "by", "specifying", "structures", "and", "metadata", "separately", ".", "Some", "of", "the", "metadata", "is", "applied", "to", "all", "of", "the", "structures", "for"...
python
train
osrg/ryu
ryu/base/app_manager.py
https://github.com/osrg/ryu/blob/6f906e72c92e10bd0264c9b91a2f7bb85b97780c/ryu/base/app_manager.py#L305-L318
def send_event(self, name, ev, state=None): """ Send the specified event to the RyuApp instance specified by name. """ if name in SERVICE_BRICKS: if isinstance(ev, EventRequestBase): ev.src = self.name LOG.debug("EVENT %s->%s %s", self.name, name, ev.__class__.__name__) SERVICE_BRICKS[name]._send_event(ev, state) else: LOG.debug("EVENT LOST %s->%s %s", self.name, name, ev.__class__.__name__)
[ "def", "send_event", "(", "self", ",", "name", ",", "ev", ",", "state", "=", "None", ")", ":", "if", "name", "in", "SERVICE_BRICKS", ":", "if", "isinstance", "(", "ev", ",", "EventRequestBase", ")", ":", "ev", ".", "src", "=", "self", ".", "name", ...
Send the specified event to the RyuApp instance specified by name.
[ "Send", "the", "specified", "event", "to", "the", "RyuApp", "instance", "specified", "by", "name", "." ]
python
train
django-leonardo/django-leonardo
leonardo/utils/compress_patch.py
https://github.com/django-leonardo/django-leonardo/blob/4b933e1792221a13b4028753d5f1d3499b0816d4/leonardo/utils/compress_patch.py#L37-L62
def compress_monkey_patch(): """patch all compress we need access to variables from widget scss for example we have:: /themes/bootswatch/cyborg/_variables but only if is cyborg active for this reasone we need dynamically append import to every scss file """ from compressor.templatetags import compress as compress_tags from compressor import base as compress_base compress_base.Compressor.filter_input = filter_input compress_base.Compressor.output = output compress_base.Compressor.hunks = hunks compress_base.Compressor.precompile = precompile compress_tags.CompressorMixin.render_compressed = render_compressed from django_pyscss import compressor as pyscss_compressor pyscss_compressor.DjangoScssFilter.input = input
[ "def", "compress_monkey_patch", "(", ")", ":", "from", "compressor", ".", "templatetags", "import", "compress", "as", "compress_tags", "from", "compressor", "import", "base", "as", "compress_base", "compress_base", ".", "Compressor", ".", "filter_input", "=", "filte...
patch all compress we need access to variables from widget scss for example we have:: /themes/bootswatch/cyborg/_variables but only if is cyborg active for this reasone we need dynamically append import to every scss file
[ "patch", "all", "compress" ]
python
train
cloud9ers/gurumate
environment/lib/python2.7/site-packages/psutil/__init__.py
https://github.com/cloud9ers/gurumate/blob/075dc74d1ee62a8c6b7a8bf2b271364f01629d1e/environment/lib/python2.7/site-packages/psutil/__init__.py#L546-L554
def get_memory_percent(self): """Compare physical system memory to process resident memory and calculate process memory utilization as a percentage. """ rss = self._platform_impl.get_memory_info()[0] try: return (rss / float(TOTAL_PHYMEM)) * 100 except ZeroDivisionError: return 0.0
[ "def", "get_memory_percent", "(", "self", ")", ":", "rss", "=", "self", ".", "_platform_impl", ".", "get_memory_info", "(", ")", "[", "0", "]", "try", ":", "return", "(", "rss", "/", "float", "(", "TOTAL_PHYMEM", ")", ")", "*", "100", "except", "ZeroDi...
Compare physical system memory to process resident memory and calculate process memory utilization as a percentage.
[ "Compare", "physical", "system", "memory", "to", "process", "resident", "memory", "and", "calculate", "process", "memory", "utilization", "as", "a", "percentage", "." ]
python
test
PyHDI/Pyverilog
pyverilog/vparser/parser.py
https://github.com/PyHDI/Pyverilog/blob/b852cc5ed6a7a2712e33639f9d9782d0d1587a53/pyverilog/vparser/parser.py#L1332-L1335
def p_levelsig(self, p): 'levelsig : levelsig_base' p[0] = Sens(p[1], 'level', lineno=p.lineno(1)) p.set_lineno(0, p.lineno(1))
[ "def", "p_levelsig", "(", "self", ",", "p", ")", ":", "p", "[", "0", "]", "=", "Sens", "(", "p", "[", "1", "]", ",", "'level'", ",", "lineno", "=", "p", ".", "lineno", "(", "1", ")", ")", "p", ".", "set_lineno", "(", "0", ",", "p", ".", "...
levelsig : levelsig_base
[ "levelsig", ":", "levelsig_base" ]
python
train
postmanlabs/httpbin
httpbin/core.py
https://github.com/postmanlabs/httpbin/blob/f8ec666b4d1b654e4ff6aedd356f510dcac09f83/httpbin/core.py#L573-L644
def redirect_to(): """302/3XX Redirects to the given URL. --- tags: - Redirects produces: - text/html get: parameters: - in: query name: url type: string required: true - in: query name: status_code type: int post: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false patch: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false put: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false responses: 302: description: A redirection. """ args_dict = request.args.items() args = CaseInsensitiveDict(args_dict) # We need to build the response manually and convert to UTF-8 to prevent # werkzeug from "fixing" the URL. This endpoint should set the Location # header to the exact string supplied. response = app.make_response("") response.status_code = 302 if "status_code" in args: status_code = int(args["status_code"]) if status_code >= 300 and status_code < 400: response.status_code = status_code response.headers["Location"] = args["url"].encode("utf-8") return response
[ "def", "redirect_to", "(", ")", ":", "args_dict", "=", "request", ".", "args", ".", "items", "(", ")", "args", "=", "CaseInsensitiveDict", "(", "args_dict", ")", "# We need to build the response manually and convert to UTF-8 to prevent", "# werkzeug from \"fixing\" the URL....
302/3XX Redirects to the given URL. --- tags: - Redirects produces: - text/html get: parameters: - in: query name: url type: string required: true - in: query name: status_code type: int post: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false patch: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false put: consumes: - application/x-www-form-urlencoded parameters: - in: formData name: url type: string required: true - in: formData name: status_code type: int required: false responses: 302: description: A redirection.
[ "302", "/", "3XX", "Redirects", "to", "the", "given", "URL", ".", "---", "tags", ":", "-", "Redirects", "produces", ":", "-", "text", "/", "html", "get", ":", "parameters", ":", "-", "in", ":", "query", "name", ":", "url", "type", ":", "string", "r...
python
train
wmayner/pyphi
pyphi/subsystem.py
https://github.com/wmayner/pyphi/blob/deeca69a084d782a6fde7bf26f59e93b593c5d77/pyphi/subsystem.py#L702-L707
def mie(self, mechanism, purviews=False): """Return the mechanism's maximally-irreducible effect (|MIE|). Alias for |find_mice()| with ``direction`` set to |EFFECT|. """ return self.find_mice(Direction.EFFECT, mechanism, purviews=purviews)
[ "def", "mie", "(", "self", ",", "mechanism", ",", "purviews", "=", "False", ")", ":", "return", "self", ".", "find_mice", "(", "Direction", ".", "EFFECT", ",", "mechanism", ",", "purviews", "=", "purviews", ")" ]
Return the mechanism's maximally-irreducible effect (|MIE|). Alias for |find_mice()| with ``direction`` set to |EFFECT|.
[ "Return", "the", "mechanism", "s", "maximally", "-", "irreducible", "effect", "(", "|MIE|", ")", "." ]
python
train
SCIP-Interfaces/PySCIPOpt
examples/unfinished/scheduling.py
https://github.com/SCIP-Interfaces/PySCIPOpt/blob/9c960b40d94a48b0304d73dbe28b467b9c065abe/examples/unfinished/scheduling.py#L57-L90
def scheduling_time_index(J,p,r,w): """ scheduling_time_index: model for the one machine total weighted tardiness problem Model for the one machine total weighted tardiness problem using the time index formulation Parameters: - J: set of jobs - p[j]: processing time of job j - r[j]: earliest start time of job j - w[j]: weighted of job j; the objective is the sum of the weighted completion time Returns a model, ready to be solved. """ model = Model("scheduling: time index") T = max(r.values()) + sum(p.values()) X = {} # X[j,t]=1 if job j starts processing at time t, 0 otherwise for j in J: for t in range(r[j], T-p[j]+2): X[j,t] = model.addVar(vtype="B", name="x(%s,%s)"%(j,t)) for j in J: model.addCons(quicksum(X[j,t] for t in range(1,T+1) if (j,t) in X) == 1, "JobExecution(%s)"%(j)) for t in range(1,T+1): ind = [(j,t2) for j in J for t2 in range(t-p[j]+1,t+1) if (j,t2) in X] if ind != []: model.addCons(quicksum(X[j,t2] for (j,t2) in ind) <= 1, "MachineUB(%s)"%t) model.setObjective(quicksum((w[j] * (t - 1 + p[j])) * X[j,t] for (j,t) in X), "minimize") model.data = X return model
[ "def", "scheduling_time_index", "(", "J", ",", "p", ",", "r", ",", "w", ")", ":", "model", "=", "Model", "(", "\"scheduling: time index\"", ")", "T", "=", "max", "(", "r", ".", "values", "(", ")", ")", "+", "sum", "(", "p", ".", "values", "(", ")...
scheduling_time_index: model for the one machine total weighted tardiness problem Model for the one machine total weighted tardiness problem using the time index formulation Parameters: - J: set of jobs - p[j]: processing time of job j - r[j]: earliest start time of job j - w[j]: weighted of job j; the objective is the sum of the weighted completion time Returns a model, ready to be solved.
[ "scheduling_time_index", ":", "model", "for", "the", "one", "machine", "total", "weighted", "tardiness", "problem" ]
python
train
ensime/ensime-vim
ensime_shared/client.py
https://github.com/ensime/ensime-vim/blob/caa734e84f002b25446c615706283a74edd4ecfe/ensime_shared/client.py#L324-L330
def send_at_point(self, what, row, col): """Ask the server to perform an operation at a given point.""" pos = self.get_position(row, col) self.send_request( {"typehint": what + "AtPointReq", "file": self._file_info(), "point": pos})
[ "def", "send_at_point", "(", "self", ",", "what", ",", "row", ",", "col", ")", ":", "pos", "=", "self", ".", "get_position", "(", "row", ",", "col", ")", "self", ".", "send_request", "(", "{", "\"typehint\"", ":", "what", "+", "\"AtPointReq\"", ",", ...
Ask the server to perform an operation at a given point.
[ "Ask", "the", "server", "to", "perform", "an", "operation", "at", "a", "given", "point", "." ]
python
train
mcs07/ChemDataExtractor
chemdataextractor/cli/__init__.py
https://github.com/mcs07/ChemDataExtractor/blob/349a3bea965f2073141d62043b89319222e46af1/chemdataextractor/cli/__init__.py#L46-L53
def extract(ctx, input, output): """Run ChemDataExtractor on a document.""" log.info('chemdataextractor.extract') log.info('Reading %s' % input.name) doc = Document.from_file(input, fname=input.name) records = [record.serialize(primitive=True) for record in doc.records] jsonstring = json.dumps(records, indent=2, ensure_ascii=False) output.write(jsonstring)
[ "def", "extract", "(", "ctx", ",", "input", ",", "output", ")", ":", "log", ".", "info", "(", "'chemdataextractor.extract'", ")", "log", ".", "info", "(", "'Reading %s'", "%", "input", ".", "name", ")", "doc", "=", "Document", ".", "from_file", "(", "i...
Run ChemDataExtractor on a document.
[ "Run", "ChemDataExtractor", "on", "a", "document", "." ]
python
train
L3viathan/sql-mojo-parser
sql_mojo_parser/__init__.py
https://github.com/L3viathan/sql-mojo-parser/blob/fc460c42f3fbcc21c6fc08c0aede8e2a5db637f3/sql_mojo_parser/__init__.py#L33-L36
def t_STRING(t): r"'([^'\\]+|\\'|\\\\)*'" t.value = t.value.replace(r'\\', chr(92)).replace(r"\'", r"'")[1:-1] return t
[ "def", "t_STRING", "(", "t", ")", ":", "t", ".", "value", "=", "t", ".", "value", ".", "replace", "(", "r'\\\\'", ",", "chr", "(", "92", ")", ")", ".", "replace", "(", "r\"\\'\"", ",", "r\"'\"", ")", "[", "1", ":", "-", "1", "]", "return", "t...
r"'([^'\\]+|\\'|\\\\)*
[ "r", "(", "[", "^", "\\\\", "]", "+", "|", "\\\\", "|", "\\\\\\\\", ")", "*" ]
python
test
adamcharnock/python-hue-client
hueclient/models/light.py
https://github.com/adamcharnock/python-hue-client/blob/b934d8eab29ad301ff4e43462e37f0f2d4e682e5/hueclient/models/light.py#L65-L71
def set_rgb(self, red, green, blue): """The red/green/blue color value of the light This will be converted and set as the :attr:`xy` value """ x, y = rgb_to_xy(red, green, blue) self.xy = [x, y]
[ "def", "set_rgb", "(", "self", ",", "red", ",", "green", ",", "blue", ")", ":", "x", ",", "y", "=", "rgb_to_xy", "(", "red", ",", "green", ",", "blue", ")", "self", ".", "xy", "=", "[", "x", ",", "y", "]" ]
The red/green/blue color value of the light This will be converted and set as the :attr:`xy` value
[ "The", "red", "/", "green", "/", "blue", "color", "value", "of", "the", "light" ]
python
train
rackerlabs/timid
timid/steps.py
https://github.com/rackerlabs/timid/blob/b1c6aa159ab380a033740f4aa392cf0d125e0ac6/timid/steps.py#L353-L409
def parse_file(cls, ctxt, fname, key=None, step_addr=None): """ Parse a YAML file containing test steps. :param ctxt: The context object. :param fname: The name of the file to parse. :param key: An optional dictionary key. If specified, the file must be a YAML dictionary, and the referenced value will be interpreted as a list of steps. If not provided, the file must be a YAML list, which will be interpreted as the list of steps. :param step_addr: The address of the step in the test configuration. This may be used in the case of includes, for instance. :returns: A list of ``Step`` objects. """ # Load the YAML file try: with open(fname) as f: step_data = yaml.load(f) except Exception as exc: raise ConfigError( 'Failed to read file "%s": %s' % (fname, exc), step_addr, ) # Do we have a key? if key is not None: if (not isinstance(step_data, collections.Mapping) or key not in step_data): raise ConfigError( 'Bad step configuration file "%s": expecting dictionary ' 'with key "%s"' % (fname, key), step_addr, ) # Extract just the step data step_data = step_data[key] # Validate that it's a sequence if not isinstance(step_data, collections.Sequence): addr = ('%s[%s]' % (fname, key)) if key is not None else fname raise ConfigError( 'Bad step configuration sequence at %s: expecting list, ' 'not "%s"' % (addr, step_data.__class__.__name__), step_addr, ) # OK, assemble the step list and return it steps = [] for idx, step_conf in enumerate(step_data): steps.extend(cls.parse_step( ctxt, StepAddress(fname, idx, key), step_conf)) return steps
[ "def", "parse_file", "(", "cls", ",", "ctxt", ",", "fname", ",", "key", "=", "None", ",", "step_addr", "=", "None", ")", ":", "# Load the YAML file", "try", ":", "with", "open", "(", "fname", ")", "as", "f", ":", "step_data", "=", "yaml", ".", "load"...
Parse a YAML file containing test steps. :param ctxt: The context object. :param fname: The name of the file to parse. :param key: An optional dictionary key. If specified, the file must be a YAML dictionary, and the referenced value will be interpreted as a list of steps. If not provided, the file must be a YAML list, which will be interpreted as the list of steps. :param step_addr: The address of the step in the test configuration. This may be used in the case of includes, for instance. :returns: A list of ``Step`` objects.
[ "Parse", "a", "YAML", "file", "containing", "test", "steps", "." ]
python
test
mfcloud/python-zvm-sdk
smtLayer/powerVM.py
https://github.com/mfcloud/python-zvm-sdk/blob/de9994ceca764f5460ce51bd74237986341d8e3c/smtLayer/powerVM.py#L487-L512
def pause(rh): """ Pause a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'PAUSE' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error """ rh.printSysLog("Enter powerVM.pause, userid: " + rh.userid) parms = ["-T", rh.userid, "-k", "PAUSE=YES"] results = invokeSMCLI(rh, "Image_Pause", parms) if results['overallRC'] != 0: # SMAPI API failed. rh.printLn("ES", results['response']) rh.updateResults(results) # Use results from invokeSMCLI rh.printSysLog("Exit powerVM.pause, rc: " + str(rh.results['overallRC'])) return rh.results['overallRC']
[ "def", "pause", "(", "rh", ")", ":", "rh", ".", "printSysLog", "(", "\"Enter powerVM.pause, userid: \"", "+", "rh", ".", "userid", ")", "parms", "=", "[", "\"-T\"", ",", "rh", ".", "userid", ",", "\"-k\"", ",", "\"PAUSE=YES\"", "]", "results", "=", "invo...
Pause a virtual machine. Input: Request Handle with the following properties: function - 'POWERVM' subfunction - 'PAUSE' userid - userid of the virtual machine Output: Request Handle updated with the results. Return code - 0: ok, non-zero: error
[ "Pause", "a", "virtual", "machine", "." ]
python
train
Pringley/spyglass
spyglass/scraper.py
https://github.com/Pringley/spyglass/blob/091d74f34837673af936daa9f462ad8216be9916/spyglass/scraper.py#L15-L37
def top(self, n=10, cache=None, prefetch=False): """Find the most popular torrents. Return an array of Torrent objects representing the top n torrents. If the cache option is non-None, override the Scraper's default caching settings. Use the prefetch option to hit each Torrent's info page up front (instead of lazy fetching the info on-demand later). """ use_cache = self._use_cache(cache) if use_cache and len(self._top_cache) >= n: return self._top_cache[:n] soup = get(TOP).soup links = soup.find_all("a", class_="detLink")[:n] urls = [urlparse.urljoin(TOP, link.get('href')) for link in links] torrents = [self.torrent_from_url(url, use_cache, prefetch) for url in urls] if use_cache: self._top_cache = torrents self._add_to_torrent_cache(torrents) return torrents
[ "def", "top", "(", "self", ",", "n", "=", "10", ",", "cache", "=", "None", ",", "prefetch", "=", "False", ")", ":", "use_cache", "=", "self", ".", "_use_cache", "(", "cache", ")", "if", "use_cache", "and", "len", "(", "self", ".", "_top_cache", ")"...
Find the most popular torrents. Return an array of Torrent objects representing the top n torrents. If the cache option is non-None, override the Scraper's default caching settings. Use the prefetch option to hit each Torrent's info page up front (instead of lazy fetching the info on-demand later).
[ "Find", "the", "most", "popular", "torrents", "." ]
python
train
InfoAgeTech/django-core
django_core/db/models/managers.py
https://github.com/InfoAgeTech/django-core/blob/9664a145473b75120bf71e1644e9c8086e7e8955/django_core/db/models/managers.py#L145-L165
def get_next_slug(self, slug, **kwargs): """Gets the next available slug. :param slug: the slug to slugify :param kwargs: additional filter criteria to check for when looking for a unique slug. Example: if the value "my-slug" is already taken, this method will append "-n" to the end of the slug until the next available slug is found. """ original_slug = slug = slugify(slug) count = 0 while not self.is_slug_available(slug=slug, **kwargs): count += 1 slug = '{0}-{1}'.format(original_slug, count) return slug
[ "def", "get_next_slug", "(", "self", ",", "slug", ",", "*", "*", "kwargs", ")", ":", "original_slug", "=", "slug", "=", "slugify", "(", "slug", ")", "count", "=", "0", "while", "not", "self", ".", "is_slug_available", "(", "slug", "=", "slug", ",", "...
Gets the next available slug. :param slug: the slug to slugify :param kwargs: additional filter criteria to check for when looking for a unique slug. Example: if the value "my-slug" is already taken, this method will append "-n" to the end of the slug until the next available slug is found.
[ "Gets", "the", "next", "available", "slug", "." ]
python
train
intel-analytics/BigDL
pyspark/bigdl/optim/optimizer.py
https://github.com/intel-analytics/BigDL/blob/e9c19788285986ab789a2e2998f9a85d7524779f/pyspark/bigdl/optim/optimizer.py#L453-L460
def save(self, path, overWrite): """ save OptimMethod :param path path :param overWrite whether to overwrite """ method=self.value return callBigDlFunc(self.bigdl_type, "saveOptimMethod", method, path, overWrite)
[ "def", "save", "(", "self", ",", "path", ",", "overWrite", ")", ":", "method", "=", "self", ".", "value", "return", "callBigDlFunc", "(", "self", ".", "bigdl_type", ",", "\"saveOptimMethod\"", ",", "method", ",", "path", ",", "overWrite", ")" ]
save OptimMethod :param path path :param overWrite whether to overwrite
[ "save", "OptimMethod", ":", "param", "path", "path", ":", "param", "overWrite", "whether", "to", "overwrite" ]
python
test
materialsproject/pymatgen
pymatgen/core/structure.py
https://github.com/materialsproject/pymatgen/blob/4ca558cf72f8d5f8a1f21dfdfc0181a971c186da/pymatgen/core/structure.py#L2055-L2098
def break_bond(self, ind1, ind2, tol=0.2): """ Returns two molecules based on breaking the bond between atoms at index ind1 and ind2. Args: ind1 (int): Index of first site. ind2 (int): Index of second site. tol (float): Relative tolerance to test. Basically, the code checks if the distance between the sites is less than (1 + tol) * typical bond distances. Defaults to 0.2, i.e., 20% longer. Returns: Two Molecule objects representing the two clusters formed from breaking the bond. """ sites = self._sites clusters = [[sites[ind1]], [sites[ind2]]] sites = [site for i, site in enumerate(sites) if i not in (ind1, ind2)] def belongs_to_cluster(site, cluster): for test_site in cluster: if CovalentBond.is_bonded(site, test_site, tol=tol): return True return False while len(sites) > 0: unmatched = [] for site in sites: for cluster in clusters: if belongs_to_cluster(site, cluster): cluster.append(site) break else: unmatched.append(site) if len(unmatched) == len(sites): raise ValueError("Not all sites are matched!") sites = unmatched return (self.__class__.from_sites(cluster) for cluster in clusters)
[ "def", "break_bond", "(", "self", ",", "ind1", ",", "ind2", ",", "tol", "=", "0.2", ")", ":", "sites", "=", "self", ".", "_sites", "clusters", "=", "[", "[", "sites", "[", "ind1", "]", "]", ",", "[", "sites", "[", "ind2", "]", "]", "]", "sites"...
Returns two molecules based on breaking the bond between atoms at index ind1 and ind2. Args: ind1 (int): Index of first site. ind2 (int): Index of second site. tol (float): Relative tolerance to test. Basically, the code checks if the distance between the sites is less than (1 + tol) * typical bond distances. Defaults to 0.2, i.e., 20% longer. Returns: Two Molecule objects representing the two clusters formed from breaking the bond.
[ "Returns", "two", "molecules", "based", "on", "breaking", "the", "bond", "between", "atoms", "at", "index", "ind1", "and", "ind2", "." ]
python
train
saltstack/salt
salt/states/bluecoat_sslv.py
https://github.com/saltstack/salt/blob/e8541fd6e744ab0df786c0f76102e41631f45d46/salt/states/bluecoat_sslv.py#L50-L95
def distinguished_name_list_exists(name, items): ''' Ensures that a distinguished name list exists with the items provided. name: The name of the module function to execute. name(str): The name of the distinguished names list. items(list): A list of items to ensure exist on the distinguished names list. SLS Example: .. code-block:: yaml MyDistinguishedNameList: bluecoat_sslv.distinguished_name_list_exists: items: - cn=test.com - cn=othersite.com ''' ret = _default_ret(name) req_change = False try: existing_lists = __salt__['bluecoat_sslv.get_distinguished_name_lists']() if name not in existing_lists: __salt__['bluecoat_sslv.add_distinguished_name_list'](name) req_change = True list_members = __salt__['bluecoat_sslv.get_distinguished_name_list'](name) for item in items: if item not in list_members: __salt__['bluecoat_sslv.add_distinguished_name'](name, item) req_change = True if req_change: ret['changes']['before'] = list_members ret['changes']['after'] = __salt__['bluecoat_sslv.get_distinguished_name_list'](name) ret['comment'] = "Updated distinguished name list." else: ret['comment'] = "No changes required." except salt.exceptions.CommandExecutionError as err: ret['result'] = False ret['comment'] = err log.error(err) return ret ret['result'] = True return ret
[ "def", "distinguished_name_list_exists", "(", "name", ",", "items", ")", ":", "ret", "=", "_default_ret", "(", "name", ")", "req_change", "=", "False", "try", ":", "existing_lists", "=", "__salt__", "[", "'bluecoat_sslv.get_distinguished_name_lists'", "]", "(", ")...
Ensures that a distinguished name list exists with the items provided. name: The name of the module function to execute. name(str): The name of the distinguished names list. items(list): A list of items to ensure exist on the distinguished names list. SLS Example: .. code-block:: yaml MyDistinguishedNameList: bluecoat_sslv.distinguished_name_list_exists: items: - cn=test.com - cn=othersite.com
[ "Ensures", "that", "a", "distinguished", "name", "list", "exists", "with", "the", "items", "provided", "." ]
python
train
aws/aws-dynamodb-encryption-python
src/dynamodb_encryption_sdk/internal/formatting/material_description.py
https://github.com/aws/aws-dynamodb-encryption-python/blob/8de3bbe13df39c59b21bf431010f7acfcf629a2f/src/dynamodb_encryption_sdk/internal/formatting/material_description.py#L103-L119
def _read_version(material_description_bytes): # type: (io.BytesIO) -> None """Read the version from the serialized material description and raise an error if it is unknown. :param material_description_bytes: serializezd material description :type material_description_bytes: io.BytesIO :raises InvalidMaterialDescriptionError: if malformed version :raises InvalidMaterialDescriptionVersionError: if unknown version is found """ try: (version,) = unpack_value(">4s", material_description_bytes) except struct.error: message = "Malformed material description version" _LOGGER.exception(message) raise InvalidMaterialDescriptionError(message) if version != _MATERIAL_DESCRIPTION_VERSION: raise InvalidMaterialDescriptionVersionError("Invalid material description version: {}".format(repr(version)))
[ "def", "_read_version", "(", "material_description_bytes", ")", ":", "# type: (io.BytesIO) -> None", "try", ":", "(", "version", ",", ")", "=", "unpack_value", "(", "\">4s\"", ",", "material_description_bytes", ")", "except", "struct", ".", "error", ":", "message", ...
Read the version from the serialized material description and raise an error if it is unknown. :param material_description_bytes: serializezd material description :type material_description_bytes: io.BytesIO :raises InvalidMaterialDescriptionError: if malformed version :raises InvalidMaterialDescriptionVersionError: if unknown version is found
[ "Read", "the", "version", "from", "the", "serialized", "material", "description", "and", "raise", "an", "error", "if", "it", "is", "unknown", "." ]
python
train
ARMmbed/mbed-cloud-sdk-python
src/mbed_cloud/_backends/iam/apis/account_admin_api.py
https://github.com/ARMmbed/mbed-cloud-sdk-python/blob/c0af86fb2cdd4dc7ed26f236139241067d293509/src/mbed_cloud/_backends/iam/apis/account_admin_api.py#L2116-L2137
def update_group_name(self, group_id, body, **kwargs): # noqa: E501 """Update the group name. # noqa: E501 An endpoint for updating a group name. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -d '{\"name\": \"TestGroup2\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.update_group_name(group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str group_id: The ID of the group to be updated. (required) :param GroupUpdateInfo body: Details of the group to be created. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread. """ kwargs['_return_http_data_only'] = True if kwargs.get('asynchronous'): return self.update_group_name_with_http_info(group_id, body, **kwargs) # noqa: E501 else: (data) = self.update_group_name_with_http_info(group_id, body, **kwargs) # noqa: E501 return data
[ "def", "update_group_name", "(", "self", ",", "group_id", ",", "body", ",", "*", "*", "kwargs", ")", ":", "# noqa: E501", "kwargs", "[", "'_return_http_data_only'", "]", "=", "True", "if", "kwargs", ".", "get", "(", "'asynchronous'", ")", ":", "return", "s...
Update the group name. # noqa: E501 An endpoint for updating a group name. **Example usage:** `curl -X PUT https://api.us-east-1.mbedcloud.com/v3/policy-groups/{group-id} -d '{\"name\": \"TestGroup2\"}' -H 'content-type: application/json' -H 'Authorization: Bearer API_KEY'` # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass asynchronous=True >>> thread = api.update_group_name(group_id, body, asynchronous=True) >>> result = thread.get() :param asynchronous bool :param str group_id: The ID of the group to be updated. (required) :param GroupUpdateInfo body: Details of the group to be created. (required) :return: UpdatedResponse If the method is called asynchronously, returns the request thread.
[ "Update", "the", "group", "name", ".", "#", "noqa", ":", "E501" ]
python
train
sernst/cauldron
cauldron/session/display/__init__.py
https://github.com/sernst/cauldron/blob/4086aec9c038c402ea212c79fe8bd0d27104f9cf/cauldron/session/display/__init__.py#L55-L74
def text(value: str, preformatted: bool = False): """ Adds text to the display. If the text is not preformatted, it will be displayed in paragraph format. Preformatted text will be displayed inside a pre tag with a monospace font. :param value: The text to display. :param preformatted: Whether or not to preserve the whitespace display of the text. """ if preformatted: result = render_texts.preformatted_text(value) else: result = render_texts.text(value) r = _get_report() r.append_body(result) r.stdout_interceptor.write_source( '{}\n'.format(textwrap.dedent(value)) )
[ "def", "text", "(", "value", ":", "str", ",", "preformatted", ":", "bool", "=", "False", ")", ":", "if", "preformatted", ":", "result", "=", "render_texts", ".", "preformatted_text", "(", "value", ")", "else", ":", "result", "=", "render_texts", ".", "te...
Adds text to the display. If the text is not preformatted, it will be displayed in paragraph format. Preformatted text will be displayed inside a pre tag with a monospace font. :param value: The text to display. :param preformatted: Whether or not to preserve the whitespace display of the text.
[ "Adds", "text", "to", "the", "display", ".", "If", "the", "text", "is", "not", "preformatted", "it", "will", "be", "displayed", "in", "paragraph", "format", ".", "Preformatted", "text", "will", "be", "displayed", "inside", "a", "pre", "tag", "with", "a", ...
python
train
llllllllll/codetransformer
codetransformer/decompiler/_343.py
https://github.com/llllllllll/codetransformer/blob/c5f551e915df45adc7da7e0b1b635f0cc6a1bb27/codetransformer/decompiler/_343.py#L1061-L1067
def make_call_positionals(stack_builders, count): """ Make the args entry for an ast.Call node. """ out = [make_expr(stack_builders) for _ in range(count)] out.reverse() return out
[ "def", "make_call_positionals", "(", "stack_builders", ",", "count", ")", ":", "out", "=", "[", "make_expr", "(", "stack_builders", ")", "for", "_", "in", "range", "(", "count", ")", "]", "out", ".", "reverse", "(", ")", "return", "out" ]
Make the args entry for an ast.Call node.
[ "Make", "the", "args", "entry", "for", "an", "ast", ".", "Call", "node", "." ]
python
train
chartbeat-labs/swailing
swailing/logger.py
https://github.com/chartbeat-labs/swailing/blob/d55e0dd7af59a2ba93f7c9c46ff56f6a4080b222/swailing/logger.py#L67-L73
def debug(self, msg=None, *args, **kwargs): """Write log at DEBUG level. Same arguments as Python's built-in Logger. """ return self._log(logging.DEBUG, msg, args, kwargs)
[ "def", "debug", "(", "self", ",", "msg", "=", "None", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "self", ".", "_log", "(", "logging", ".", "DEBUG", ",", "msg", ",", "args", ",", "kwargs", ")" ]
Write log at DEBUG level. Same arguments as Python's built-in Logger.
[ "Write", "log", "at", "DEBUG", "level", ".", "Same", "arguments", "as", "Python", "s", "built", "-", "in", "Logger", "." ]
python
train
lsst-sqre/documenteer
documenteer/sphinxext/lssttasks/topiclists.py
https://github.com/lsst-sqre/documenteer/blob/75f02901a80042b28d074df1cc1dca32eb8e38c8/documenteer/sphinxext/lssttasks/topiclists.py#L176-L257
def process_task_topic_list(app, doctree, fromdocname): """Process the ``task_topic_list`` node to generate a rendered listing of Task, Configurable, or Config topics (as determined by the types key of the ``task_topic_list`` node). This is called during the "doctree-resolved" phase so that the ``lsst_task_topcs`` environment attribute is fully set. """ logger = getLogger(__name__) logger.debug('Started process_task_list') env = app.builder.env for node in doctree.traverse(task_topic_list): try: topics = env.lsst_task_topics except AttributeError: message = ( "Environment does not have 'lsst_task_topics', " "can't process the listing." ) logger.warning(message) node.replace_self(nodes.paragraph(text=message)) continue root = node['root_namespace'] # Sort tasks by the topic's class name. # NOTE: if the presentation of the link is changed to the fully # qualified name, with full Python namespace, then the topic_names # should be changed to match that. topic_keys = [k for k, topic in topics.items() if topic['type'] in node['types'] if topic['fully_qualified_name'].startswith(root)] topic_names = [topics[k]['fully_qualified_name'].split('.')[-1] for k in topic_keys] topic_keys = [ k for k, _ in sorted(zip(topic_keys, topic_names), key=lambda pair: pair[1])] if len(topic_keys) == 0: # Fallback if no topics are found p = nodes.paragraph(text='No topics.') node.replace_self(p) continue dl = nodes.definition_list() for key in topic_keys: topic = topics[key] class_name = topic['fully_qualified_name'].split('.')[-1] summary_text = topic['summary_node'][0].astext() # Each topic in the listing is a definition list item. The term is # the linked class name and the description is the summary # sentence from the docstring _or_ the content of the # topic directive dl_item = nodes.definition_list_item() # Can insert an actual reference since the doctree is resolved. ref_node = nodes.reference('', '') ref_node['refdocname'] = topic['docname'] ref_node['refuri'] = app.builder.get_relative_uri( fromdocname, topic['docname']) # NOTE: Not appending an anchor to the URI because task topics # are designed to occupy an entire page. link_label = nodes.Text(class_name, class_name) ref_node += link_label term = nodes.term() term += ref_node dl_item += term # We're degrading the summary to plain text to avoid syntax issues # and also because it may be distracting def_node = nodes.definition() def_node += nodes.paragraph(text=summary_text) dl_item += def_node dl += dl_item # Replace the task_list node (a placeholder) with this renderable # content node.replace_self(dl)
[ "def", "process_task_topic_list", "(", "app", ",", "doctree", ",", "fromdocname", ")", ":", "logger", "=", "getLogger", "(", "__name__", ")", "logger", ".", "debug", "(", "'Started process_task_list'", ")", "env", "=", "app", ".", "builder", ".", "env", "for...
Process the ``task_topic_list`` node to generate a rendered listing of Task, Configurable, or Config topics (as determined by the types key of the ``task_topic_list`` node). This is called during the "doctree-resolved" phase so that the ``lsst_task_topcs`` environment attribute is fully set.
[ "Process", "the", "task_topic_list", "node", "to", "generate", "a", "rendered", "listing", "of", "Task", "Configurable", "or", "Config", "topics", "(", "as", "determined", "by", "the", "types", "key", "of", "the", "task_topic_list", "node", ")", "." ]
python
train
dpgaspar/Flask-AppBuilder
flask_appbuilder/security/manager.py
https://github.com/dpgaspar/Flask-AppBuilder/blob/c293734c1b86e176a3ba57ee2deab6676d125576/flask_appbuilder/security/manager.py#L774-L785
def _bind_indirect_user(self, ldap, con): """ If using AUTH_LDAP_BIND_USER bind this user before performing search :param ldap: The ldap module reference :param con: The ldap connection """ indirect_user = self.auth_ldap_bind_user if indirect_user: indirect_password = self.auth_ldap_bind_password log.debug("LDAP indirect bind with: {0}".format(indirect_user)) con.bind_s(indirect_user, indirect_password) log.debug("LDAP BIND indirect OK")
[ "def", "_bind_indirect_user", "(", "self", ",", "ldap", ",", "con", ")", ":", "indirect_user", "=", "self", ".", "auth_ldap_bind_user", "if", "indirect_user", ":", "indirect_password", "=", "self", ".", "auth_ldap_bind_password", "log", ".", "debug", "(", "\"LDA...
If using AUTH_LDAP_BIND_USER bind this user before performing search :param ldap: The ldap module reference :param con: The ldap connection
[ "If", "using", "AUTH_LDAP_BIND_USER", "bind", "this", "user", "before", "performing", "search", ":", "param", "ldap", ":", "The", "ldap", "module", "reference", ":", "param", "con", ":", "The", "ldap", "connection" ]
python
train
ska-sa/montblanc
montblanc/impl/rime/tensorflow/sources/ms_source_provider.py
https://github.com/ska-sa/montblanc/blob/8a2e742e7500bcc6196489b735f87b233075dd2d/montblanc/impl/rime/tensorflow/sources/ms_source_provider.py#L159-L167
def parallactic_angles(self, context): """ parallactic angle data source """ # Time and antenna extents (lt, ut), (la, ua) = context.dim_extents('ntime', 'na') return (mbu.parallactic_angles(self._times[lt:ut], self._antenna_positions[la:ua], self._phase_dir) .reshape(context.shape) .astype(context.dtype))
[ "def", "parallactic_angles", "(", "self", ",", "context", ")", ":", "# Time and antenna extents", "(", "lt", ",", "ut", ")", ",", "(", "la", ",", "ua", ")", "=", "context", ".", "dim_extents", "(", "'ntime'", ",", "'na'", ")", "return", "(", "mbu", "."...
parallactic angle data source
[ "parallactic", "angle", "data", "source" ]
python
train
wakatime/wakatime
wakatime/packages/configparser/__init__.py
https://github.com/wakatime/wakatime/blob/74519ace04e8472f3a3993269963732b9946a01d/wakatime/packages/configparser/__init__.py#L880-L891
def popitem(self): """Remove a section from the parser and return it as a (section_name, section_proxy) tuple. If no section is present, raise KeyError. The section DEFAULT is never returned because it cannot be removed. """ for key in self.sections(): value = self[key] del self[key] return key, value raise KeyError
[ "def", "popitem", "(", "self", ")", ":", "for", "key", "in", "self", ".", "sections", "(", ")", ":", "value", "=", "self", "[", "key", "]", "del", "self", "[", "key", "]", "return", "key", ",", "value", "raise", "KeyError" ]
Remove a section from the parser and return it as a (section_name, section_proxy) tuple. If no section is present, raise KeyError. The section DEFAULT is never returned because it cannot be removed.
[ "Remove", "a", "section", "from", "the", "parser", "and", "return", "it", "as", "a", "(", "section_name", "section_proxy", ")", "tuple", ".", "If", "no", "section", "is", "present", "raise", "KeyError", "." ]
python
train
theonion/django-bulbs
bulbs/contributions/email.py
https://github.com/theonion/django-bulbs/blob/0c0e6e3127a7dc487b96677fab95cacd2b3806da/bulbs/contributions/email.py#L167-L174
def get_contributors(self): """Return a list of contributors with contributions between the start/end dates.""" return User.objects.filter( freelanceprofile__is_freelance=True ).filter( contributions__content__published__gte=self.start, contributions__content__published__lt=self.end ).distinct()
[ "def", "get_contributors", "(", "self", ")", ":", "return", "User", ".", "objects", ".", "filter", "(", "freelanceprofile__is_freelance", "=", "True", ")", ".", "filter", "(", "contributions__content__published__gte", "=", "self", ".", "start", ",", "contributions...
Return a list of contributors with contributions between the start/end dates.
[ "Return", "a", "list", "of", "contributors", "with", "contributions", "between", "the", "start", "/", "end", "dates", "." ]
python
train
CalebBell/ht
ht/conv_internal.py
https://github.com/CalebBell/ht/blob/3097ef9524c4cf0068ad453c17b10ec9ce551eee/ht/conv_internal.py#L1112-L1151
def turbulent_Gowen_Smith(Re, Pr, fd): r'''Calculates internal convection Nusselt number for turbulent flows in pipe according to [2]_ as shown in [1]_. .. math:: Nu = \frac{Re Pr (f/8)^{0.5}} {4.5 + [0.155(Re(f/8)^{0.5})^{0.54} + (8/f)^{0.5}]Pr^{0.5}} Parameters ---------- Re : float Reynolds number, [-] Pr : float Prandtl number, [-] fd : float Darcy friction factor [-] Returns ------- Nu : float Nusselt number, [-] Notes ----- 0.7 ≤ Pr ≤ 14.3 and 10^4 ≤ Re ≤ 5E4 and 0.0021 ≤ eD ≤ 0.095 Examples -------- >>> turbulent_Gowen_Smith(Re=1E5, Pr=1.2, fd=0.0185) 131.72530453824106 References ---------- .. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998. .. [2] Gowen, R. A., and J. W. Smith. “Turbulent Heat Transfer from Smooth and Rough Surfaces.” International Journal of Heat and Mass Transfer 11, no. 11 (November 1968): 1657–74. doi:10.1016/0017-9310(68)90046-X. ''' return Re*Pr*(fd/8.)**0.5/(4.5 + (0.155*(Re*(fd/8.)**0.5)**0.54 + (8./fd)**0.5)*Pr**0.5)
[ "def", "turbulent_Gowen_Smith", "(", "Re", ",", "Pr", ",", "fd", ")", ":", "return", "Re", "*", "Pr", "*", "(", "fd", "/", "8.", ")", "**", "0.5", "/", "(", "4.5", "+", "(", "0.155", "*", "(", "Re", "*", "(", "fd", "/", "8.", ")", "**", "0....
r'''Calculates internal convection Nusselt number for turbulent flows in pipe according to [2]_ as shown in [1]_. .. math:: Nu = \frac{Re Pr (f/8)^{0.5}} {4.5 + [0.155(Re(f/8)^{0.5})^{0.54} + (8/f)^{0.5}]Pr^{0.5}} Parameters ---------- Re : float Reynolds number, [-] Pr : float Prandtl number, [-] fd : float Darcy friction factor [-] Returns ------- Nu : float Nusselt number, [-] Notes ----- 0.7 ≤ Pr ≤ 14.3 and 10^4 ≤ Re ≤ 5E4 and 0.0021 ≤ eD ≤ 0.095 Examples -------- >>> turbulent_Gowen_Smith(Re=1E5, Pr=1.2, fd=0.0185) 131.72530453824106 References ---------- .. [1] Rohsenow, Warren and James Hartnett and Young Cho. Handbook of Heat Transfer, 3E. New York: McGraw-Hill, 1998. .. [2] Gowen, R. A., and J. W. Smith. “Turbulent Heat Transfer from Smooth and Rough Surfaces.” International Journal of Heat and Mass Transfer 11, no. 11 (November 1968): 1657–74. doi:10.1016/0017-9310(68)90046-X.
[ "r", "Calculates", "internal", "convection", "Nusselt", "number", "for", "turbulent", "flows", "in", "pipe", "according", "to", "[", "2", "]", "_", "as", "shown", "in", "[", "1", "]", "_", "." ]
python
train
rigetti/pyquil
pyquil/api/_qam.py
https://github.com/rigetti/pyquil/blob/ec98e453084b0037d69d8c3245f6822a5422593d/pyquil/api/_qam.py#L45-L59
def load(self, executable): """ Initialize a QAM into a fresh state. :param executable: Load a compiled executable onto the QAM. """ if self.status == 'loaded': warnings.warn("Overwriting previously loaded executable.") assert self.status in ['connected', 'done', 'loaded'] self._variables_shim = {} self._executable = executable self._bitstrings = None self.status = 'loaded' return self
[ "def", "load", "(", "self", ",", "executable", ")", ":", "if", "self", ".", "status", "==", "'loaded'", ":", "warnings", ".", "warn", "(", "\"Overwriting previously loaded executable.\"", ")", "assert", "self", ".", "status", "in", "[", "'connected'", ",", "...
Initialize a QAM into a fresh state. :param executable: Load a compiled executable onto the QAM.
[ "Initialize", "a", "QAM", "into", "a", "fresh", "state", "." ]
python
train
vertexproject/synapse
synapse/lib/syntax.py
https://github.com/vertexproject/synapse/blob/22e67c5a8f6d7caddbcf34b39ab1bd2d6c4a6e0b/synapse/lib/syntax.py#L1461-L1493
def cmdargv(self): ''' cmdargv *must* have leading whitespace to prevent foo@bar from becoming cmdname foo with argv=[@bar] ''' argv = [] while self.more(): # cmdargv *requires* whitespace if not self.ignore(whitespace): break # if we hit a | or a } we're done if self.nextstr('|'): break if self.nextstr('}'): break if not self.nextstr('{'): valu = self.cmdvalu() argv.append(valu) continue start = self.offs self.subquery() text = self.text[start:self.offs] argv.append(text) return s_ast.Const(tuple(argv))
[ "def", "cmdargv", "(", "self", ")", ":", "argv", "=", "[", "]", "while", "self", ".", "more", "(", ")", ":", "# cmdargv *requires* whitespace", "if", "not", "self", ".", "ignore", "(", "whitespace", ")", ":", "break", "# if we hit a | or a } we're done", "if...
cmdargv *must* have leading whitespace to prevent foo@bar from becoming cmdname foo with argv=[@bar]
[ "cmdargv", "*", "must", "*", "have", "leading", "whitespace", "to", "prevent", "foo" ]
python
train
datacats/datacats
datacats/environment.py
https://github.com/datacats/datacats/blob/e4bae503efa997660fb3f34fe166699569653157/datacats/environment.py#L452-L494
def _create_run_ini(self, port, production, output='development.ini', source='development.ini', override_site_url=True): """ Create run/development.ini in datadir with debug and site_url overridden and with correct db passwords inserted """ cp = SafeConfigParser() try: cp.read([self.target + '/' + source]) except ConfigParserError: raise DatacatsError('Error reading development.ini') cp.set('DEFAULT', 'debug', 'false' if production else 'true') if self.site_url: site_url = self.site_url else: if is_boot2docker(): web_address = socket.gethostbyname(docker_host()) else: web_address = self.address site_url = 'http://{}:{}'.format(web_address, port) if override_site_url: cp.set('app:main', 'ckan.site_url', site_url) cp.set('app:main', 'sqlalchemy.url', 'postgresql://ckan:{0}@db:5432/ckan' .format(self.passwords['CKAN_PASSWORD'])) cp.set('app:main', 'ckan.datastore.read_url', 'postgresql://ckan_datastore_readonly:{0}@db:5432/ckan_datastore' .format(self.passwords['DATASTORE_RO_PASSWORD'])) cp.set('app:main', 'ckan.datastore.write_url', 'postgresql://ckan_datastore_readwrite:{0}@db:5432/ckan_datastore' .format(self.passwords['DATASTORE_RW_PASSWORD'])) cp.set('app:main', 'solr_url', 'http://solr:8080/solr') cp.set('app:main', 'ckan.redis.url', 'http://redis:6379') cp.set('app:main', 'beaker.session.secret', self.passwords['BEAKER_SESSION_SECRET']) if not isdir(self.sitedir + '/run'): makedirs(self.sitedir + '/run') # upgrade old datadir with open(self.sitedir + '/run/' + output, 'w') as runini: cp.write(runini)
[ "def", "_create_run_ini", "(", "self", ",", "port", ",", "production", ",", "output", "=", "'development.ini'", ",", "source", "=", "'development.ini'", ",", "override_site_url", "=", "True", ")", ":", "cp", "=", "SafeConfigParser", "(", ")", "try", ":", "cp...
Create run/development.ini in datadir with debug and site_url overridden and with correct db passwords inserted
[ "Create", "run", "/", "development", ".", "ini", "in", "datadir", "with", "debug", "and", "site_url", "overridden", "and", "with", "correct", "db", "passwords", "inserted" ]
python
train
aws/aws-xray-sdk-python
aws_xray_sdk/core/lambda_launcher.py
https://github.com/aws/aws-xray-sdk-python/blob/707358cd3a516d51f2ebf71cf34f00e8d906a667/aws_xray_sdk/core/lambda_launcher.py#L66-L79
def put_subsegment(self, subsegment): """ Refresh the facade segment every time this function is invoked to prevent a new subsegment from being attached to a leaked segment/subsegment. """ current_entity = self.get_trace_entity() if not self._is_subsegment(current_entity) and current_entity.initializing: if global_sdk_config.sdk_enabled(): log.warning("Subsegment %s discarded due to Lambda worker still initializing" % subsegment.name) return current_entity.add_subsegment(subsegment) self._local.entities.append(subsegment)
[ "def", "put_subsegment", "(", "self", ",", "subsegment", ")", ":", "current_entity", "=", "self", ".", "get_trace_entity", "(", ")", "if", "not", "self", ".", "_is_subsegment", "(", "current_entity", ")", "and", "current_entity", ".", "initializing", ":", "if"...
Refresh the facade segment every time this function is invoked to prevent a new subsegment from being attached to a leaked segment/subsegment.
[ "Refresh", "the", "facade", "segment", "every", "time", "this", "function", "is", "invoked", "to", "prevent", "a", "new", "subsegment", "from", "being", "attached", "to", "a", "leaked", "segment", "/", "subsegment", "." ]
python
train
jslang/responsys
responsys/client.py
https://github.com/jslang/responsys/blob/9b355a444c0c75dff41064502c1e2b76dfd5cb93/responsys/client.py#L110-L131
def call(self, method, *args): """ Calls the service method defined with the arguments provided """ try: response = getattr(self.client.service, method)(*args) except (URLError, SSLError) as e: log.exception('Failed to connect to responsys service') raise ConnectError("Request to service timed out") except WebFault as web_fault: fault_name = getattr(web_fault.fault, 'faultstring', None) error = str(web_fault.fault.detail) if fault_name == 'TableFault': raise TableFault(error) if fault_name == 'ListFault': raise ListFault(error) if fault_name == 'API_LIMIT_EXCEEDED': raise ApiLimitError(error) if fault_name == 'AccountFault': raise AccountFault(error) raise ServiceError(web_fault.fault, web_fault.document) return response
[ "def", "call", "(", "self", ",", "method", ",", "*", "args", ")", ":", "try", ":", "response", "=", "getattr", "(", "self", ".", "client", ".", "service", ",", "method", ")", "(", "*", "args", ")", "except", "(", "URLError", ",", "SSLError", ")", ...
Calls the service method defined with the arguments provided
[ "Calls", "the", "service", "method", "defined", "with", "the", "arguments", "provided" ]
python
train
sailthru/sailthru-python-client
sailthru/sailthru_client.py
https://github.com/sailthru/sailthru-python-client/blob/22aa39ba0c5bddd7b8743e24ada331128c0f4f54/sailthru/sailthru_client.py#L154-L162
def get_user(self, idvalue, options=None): """ get user by a given id http://getstarted.sailthru.com/api/user """ options = options or {} data = options.copy() data['id'] = idvalue return self.api_get('user', data)
[ "def", "get_user", "(", "self", ",", "idvalue", ",", "options", "=", "None", ")", ":", "options", "=", "options", "or", "{", "}", "data", "=", "options", ".", "copy", "(", ")", "data", "[", "'id'", "]", "=", "idvalue", "return", "self", ".", "api_g...
get user by a given id http://getstarted.sailthru.com/api/user
[ "get", "user", "by", "a", "given", "id", "http", ":", "//", "getstarted", ".", "sailthru", ".", "com", "/", "api", "/", "user" ]
python
train
adafruit/Adafruit_Python_PureIO
Adafruit_PureIO/smbus.py
https://github.com/adafruit/Adafruit_Python_PureIO/blob/6f4976d91c52d70b67b28bba75a429b5328a52c1/Adafruit_PureIO/smbus.py#L258-L268
def write_word_data(self, addr, cmd, val): """Write a word (2 bytes) of data to the specified cmd register of the device. Note that this will write the data in the endianness of the processor running Python (typically little endian)! """ assert self._device is not None, 'Bus must be opened before operations are made against it!' # Construct a string of data to send with the command register and word value. data = struct.pack('=BH', cmd & 0xFF, val & 0xFFFF) # Send the data to the device. self._select_device(addr) self._device.write(data)
[ "def", "write_word_data", "(", "self", ",", "addr", ",", "cmd", ",", "val", ")", ":", "assert", "self", ".", "_device", "is", "not", "None", ",", "'Bus must be opened before operations are made against it!'", "# Construct a string of data to send with the command register a...
Write a word (2 bytes) of data to the specified cmd register of the device. Note that this will write the data in the endianness of the processor running Python (typically little endian)!
[ "Write", "a", "word", "(", "2", "bytes", ")", "of", "data", "to", "the", "specified", "cmd", "register", "of", "the", "device", ".", "Note", "that", "this", "will", "write", "the", "data", "in", "the", "endianness", "of", "the", "processor", "running", ...
python
test
crytic/pyevmasm
pyevmasm/evmasm.py
https://github.com/crytic/pyevmasm/blob/d27daf19a36d630a31499e783b716cf1165798d8/pyevmasm/evmasm.py#L571-L596
def assemble_hex(asmcode, pc=0, fork=DEFAULT_FORK): """ Assemble an EVM program :param asmcode: an evm assembler program :type asmcode: str | iterator[Instruction] :param pc: program counter of the first instruction(optional) :type pc: int :param fork: fork name (optional) :type fork: str :return: the hex representation of the bytecode :rtype: str Example use:: >>> assemble_hex('''PUSH1 0x60\n \ BLOCKHASH\n \ MSTORE\n \ PUSH1 0x2\n \ PUSH2 0x100\n \ ''') ... "0x6060604052600261010" """ if isinstance(asmcode, list): return '0x' + hexlify(b''.join([x.bytes for x in asmcode])).decode('ascii') return '0x' + hexlify(assemble(asmcode, pc=pc, fork=fork)).decode('ascii')
[ "def", "assemble_hex", "(", "asmcode", ",", "pc", "=", "0", ",", "fork", "=", "DEFAULT_FORK", ")", ":", "if", "isinstance", "(", "asmcode", ",", "list", ")", ":", "return", "'0x'", "+", "hexlify", "(", "b''", ".", "join", "(", "[", "x", ".", "bytes...
Assemble an EVM program :param asmcode: an evm assembler program :type asmcode: str | iterator[Instruction] :param pc: program counter of the first instruction(optional) :type pc: int :param fork: fork name (optional) :type fork: str :return: the hex representation of the bytecode :rtype: str Example use:: >>> assemble_hex('''PUSH1 0x60\n \ BLOCKHASH\n \ MSTORE\n \ PUSH1 0x2\n \ PUSH2 0x100\n \ ''') ... "0x6060604052600261010"
[ "Assemble", "an", "EVM", "program" ]
python
valid
goose3/goose3
goose3/extractors/title.py
https://github.com/goose3/goose3/blob/e6994b1b1826af2720a091d1bff5ca15594f558d/goose3/extractors/title.py#L33-L77
def clean_title(self, title): """Clean title with the use of og:site_name in this case try to get rid of site name and use TITLE_SPLITTERS to reformat title """ # check if we have the site name in opengraph data if "site_name" in list(self.article.opengraph.keys()): site_name = self.article.opengraph['site_name'] # remove the site name from title title = title.replace(site_name, '').strip() elif (self.article.schema and "publisher" in self.article.schema and "name" in self.article.schema["publisher"]): site_name = self.article.schema["publisher"]["name"] # remove the site name from title title = title.replace(site_name, '').strip() # try to remove the domain from url if self.article.domain: pattern = re.compile(self.article.domain, re.IGNORECASE) title = pattern.sub("", title).strip() # split the title in words # TechCrunch | my wonderfull article # my wonderfull article | TechCrunch title_words = title.split() # check if first letter is in TITLE_SPLITTERS # if so remove it if title_words and title_words[0] in TITLE_SPLITTERS: title_words.pop(0) # check for a title that is empty or consists of only a # title splitter to avoid a IndexError below if not title_words: return "" # check if last letter is in TITLE_SPLITTERS # if so remove it if title_words[-1] in TITLE_SPLITTERS: title_words.pop(-1) # rebuild the title title = " ".join(title_words).strip() return title
[ "def", "clean_title", "(", "self", ",", "title", ")", ":", "# check if we have the site name in opengraph data", "if", "\"site_name\"", "in", "list", "(", "self", ".", "article", ".", "opengraph", ".", "keys", "(", ")", ")", ":", "site_name", "=", "self", ".",...
Clean title with the use of og:site_name in this case try to get rid of site name and use TITLE_SPLITTERS to reformat title
[ "Clean", "title", "with", "the", "use", "of", "og", ":", "site_name", "in", "this", "case", "try", "to", "get", "rid", "of", "site", "name", "and", "use", "TITLE_SPLITTERS", "to", "reformat", "title" ]
python
valid
mitsei/dlkit
dlkit/json_/assessment/managers.py
https://github.com/mitsei/dlkit/blob/445f968a175d61c8d92c0f617a3c17dc1dc7c584/dlkit/json_/assessment/managers.py#L1160-L1182
def get_assessment_admin_session_for_bank(self, bank_id): """Gets the ``OsidSession`` associated with the assessment admin service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank return: (osid.assessment.AssessmentAdminSession) - ``an _assessment_admin_session`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_admin()`` and ``supports_visible_federation()`` are ``true``.* """ if not self.supports_assessment_admin(): raise errors.Unimplemented() ## # Also include check to see if the catalog Id is found otherwise raise errors.NotFound ## # pylint: disable=no-member return sessions.AssessmentAdminSession(bank_id, runtime=self._runtime)
[ "def", "get_assessment_admin_session_for_bank", "(", "self", ",", "bank_id", ")", ":", "if", "not", "self", ".", "supports_assessment_admin", "(", ")", ":", "raise", "errors", ".", "Unimplemented", "(", ")", "##", "# Also include check to see if the catalog Id is found ...
Gets the ``OsidSession`` associated with the assessment admin service for the given bank. arg: bank_id (osid.id.Id): the ``Id`` of the bank return: (osid.assessment.AssessmentAdminSession) - ``an _assessment_admin_session`` raise: NotFound - ``bank_id`` not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - ``unable to complete request`` raise: Unimplemented - ``supports_assessment_admin()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_assessment_admin()`` and ``supports_visible_federation()`` are ``true``.*
[ "Gets", "the", "OsidSession", "associated", "with", "the", "assessment", "admin", "service", "for", "the", "given", "bank", "." ]
python
train